summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/ax25/af_ax25.c12
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/ax25/ax25_ds_timer.c12
-rw-r--r--net/ax25/ax25_out.c13
-rw-r--r--net/ax25/ax25_route.c28
-rw-r--r--net/ax25/ax25_timer.c60
-rw-r--r--net/bluetooth/hci_conn.c1
-rw-r--r--net/bluetooth/hci_core.c5
-rw-r--r--net/bluetooth/hci_sysfs.c24
-rw-r--r--net/bridge/netfilter/ebt_dnat.c4
-rw-r--r--net/bridge/netfilter/ebt_redirect.c4
-rw-r--r--net/bridge/netfilter/ebt_snat.c4
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/neighbour.c7
-rw-r--r--net/core/rtnetlink.c27
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/fib_hash.c10
-rw-r--r--net/ipv4/fib_trie.c99
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/ip_gre.c12
-rw-r--r--net/ipv4/ip_sockglue.c5
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c12
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c12
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/af_inet6.c8
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c16
-rw-r--r--net/ipv6/netfilter/ip6_queue.c10
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/ieee80211.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c10
-rw-r--r--net/netfilter/xt_SECMARK.c2
-rw-r--r--net/netfilter/xt_hashlimit.c3
-rw-r--r--net/netfilter/xt_iprange.c2
-rw-r--r--net/netfilter/xt_u32.c11
-rw-r--r--net/netlabel/netlabel_cipso_v4.c45
-rw-r--r--net/netlabel/netlabel_domainhash.c8
-rw-r--r--net/netlabel/netlabel_mgmt.c81
-rw-r--r--net/netlabel/netlabel_unlabeled.c122
-rw-r--r--net/netlabel/netlabel_user.c5
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/rfkill/rfkill.c2
-rw-r--r--net/rxrpc/ar-accept.c3
-rw-r--r--net/rxrpc/ar-ack.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c5
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/rpc_pipe.c7
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sched.c255
-rw-r--r--net/sunrpc/svcsock.c6
-rw-r--r--net/sunrpc/xprt.c45
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/unix/af_unix.c26
-rw-r--r--net/xfrm/Kconfig2
-rw-r--r--net/xfrm/xfrm_input.c4
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c20
-rw-r--r--net/xfrm/xfrm_user.c1
71 files changed, 561 insertions, 587 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 0117b9fb8480..de7a9f532edc 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -110,7 +110,7 @@ static struct p9_req_t *p9_lookup_tag(struct virtio_chan *c, u16 tag)
}
for (count = old_max; count < c->max_tag; count++) {
c->reqs[count].status = REQ_STATUS_IDLE;
- c->reqs[count].wq = kmalloc(sizeof(wait_queue_t),
+ c->reqs[count].wq = kmalloc(sizeof(wait_queue_head_t),
GFP_ATOMIC);
if (!c->reqs[count].wq) {
printk(KERN_ERR "Couldn't grow tag array\n");
@@ -183,8 +183,7 @@ pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
sg_set_buf(&sg[index++], data, s);
count -= s;
data += s;
- if (index > limit)
- BUG();
+ BUG_ON(index > limit);
}
return index-start;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 8fc64e3150a2..48bfcc741f25 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -510,11 +510,7 @@ ax25_cb *ax25_create_cb(void)
skb_queue_head_init(&ax25->ack_queue);
skb_queue_head_init(&ax25->reseq_queue);
- init_timer(&ax25->timer);
- init_timer(&ax25->t1timer);
- init_timer(&ax25->t2timer);
- init_timer(&ax25->t3timer);
- init_timer(&ax25->idletimer);
+ ax25_setup_timers(ax25);
ax25_fillin_cb(ax25, NULL);
@@ -1928,12 +1924,10 @@ static int ax25_info_show(struct seq_file *seq, void *v)
ax25->paclen);
if (ax25->sk != NULL) {
- bh_lock_sock(ax25->sk);
- seq_printf(seq," %d %d %ld\n",
+ seq_printf(seq, " %d %d %lu\n",
atomic_read(&ax25->sk->sk_wmem_alloc),
atomic_read(&ax25->sk->sk_rmem_alloc),
- ax25->sk->sk_socket != NULL ? SOCK_INODE(ax25->sk->sk_socket)->i_ino : 0L);
- bh_unlock_sock(ax25->sk);
+ sock_i_ino(ax25->sk));
} else {
seq_puts(seq, " * * *\n");
}
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 528c874d9828..a7a0e0c9698b 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -82,7 +82,7 @@ void ax25_dev_device_up(struct net_device *dev)
ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
- init_timer(&ax25_dev->dama.slave_timer);
+ ax25_ds_setup_timer(ax25_dev);
#endif
spin_lock_bh(&ax25_dev_lock);
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index c4e3b025d21c..2ce79df00680 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -40,13 +40,10 @@ static void ax25_ds_timeout(unsigned long);
* 1/10th of a second.
*/
-static void ax25_ds_add_timer(ax25_dev *ax25_dev)
+void ax25_ds_setup_timer(ax25_dev *ax25_dev)
{
- struct timer_list *t = &ax25_dev->dama.slave_timer;
- t->data = (unsigned long) ax25_dev;
- t->function = &ax25_ds_timeout;
- t->expires = jiffies + HZ;
- add_timer(t);
+ setup_timer(&ax25_dev->dama.slave_timer, ax25_ds_timeout,
+ (unsigned long)ax25_dev);
}
void ax25_ds_del_timer(ax25_dev *ax25_dev)
@@ -60,10 +57,9 @@ void ax25_ds_set_timer(ax25_dev *ax25_dev)
if (ax25_dev == NULL) /* paranoia */
return;
- del_timer(&ax25_dev->dama.slave_timer);
ax25_dev->dama.slave_timeout =
msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10;
- ax25_ds_add_timer(ax25_dev);
+ mod_timer(&ax25_dev->dama.slave_timer, jiffies + HZ);
}
/*
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 92b517af7260..bf706f83a5c9 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -117,6 +117,12 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
unsigned char *p;
int frontlen, len, fragno, ka9qfrag, first = 1;
+ if (paclen < 16) {
+ WARN_ON_ONCE(1);
+ kfree_skb(skb);
+ return;
+ }
+
if ((skb->len - 1) > paclen) {
if (*skb->data == AX25_P_TEXT) {
skb_pull(skb, 1); /* skip PID */
@@ -251,8 +257,6 @@ void ax25_kick(ax25_cb *ax25)
if (start == end)
return;
- ax25->vs = start;
-
/*
* Transmit data until either we're out of data to send or
* the window is full. Send a poll on the final I frame if
@@ -261,8 +265,13 @@ void ax25_kick(ax25_cb *ax25)
/*
* Dequeue the frame and copy it.
+ * Check for race with ax25_clear_queues().
*/
skb = skb_dequeue(&ax25->write_queue);
+ if (!skb)
+ return;
+
+ ax25->vs = start;
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 38c7f3087ec3..8672cd84fdf9 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -45,7 +45,7 @@ void ax25_rt_device_down(struct net_device *dev)
{
ax25_route *s, *t, *ax25_rt;
- write_lock(&ax25_route_lock);
+ write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
s = ax25_rt;
@@ -68,7 +68,7 @@ void ax25_rt_device_down(struct net_device *dev)
}
}
}
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
}
static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
@@ -82,7 +82,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
if (route->digi_count > AX25_MAX_DIGIS)
return -EINVAL;
- write_lock(&ax25_route_lock);
+ write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
@@ -92,7 +92,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
ax25_rt->digipeat = NULL;
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
@@ -102,14 +102,14 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
ax25_rt->digipeat->calls[i] = route->digi_addr[i];
}
}
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
return 0;
}
ax25_rt = ax25_rt->next;
}
if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
return -ENOMEM;
}
@@ -120,7 +120,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
ax25_rt->ip_mode = ' ';
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
kfree(ax25_rt);
return -ENOMEM;
}
@@ -133,7 +133,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
}
ax25_rt->next = ax25_route_list;
ax25_route_list = ax25_rt;
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
return 0;
}
@@ -152,7 +152,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
return -EINVAL;
- write_lock(&ax25_route_lock);
+ write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
@@ -174,7 +174,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
}
}
}
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
return 0;
}
@@ -188,7 +188,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
if ((ax25_dev = ax25_addr_ax25dev(&rt_option->port_addr)) == NULL)
return -EINVAL;
- write_lock(&ax25_route_lock);
+ write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
@@ -216,7 +216,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
}
out:
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
return err;
}
@@ -492,7 +492,7 @@ void __exit ax25_rt_free(void)
{
ax25_route *s, *ax25_rt = ax25_route_list;
- write_lock(&ax25_route_lock);
+ write_lock_bh(&ax25_route_lock);
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
@@ -500,5 +500,5 @@ void __exit ax25_rt_free(void)
kfree(s->digipeat);
kfree(s);
}
- write_unlock(&ax25_route_lock);
+ write_unlock_bh(&ax25_route_lock);
}
diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c
index 72594867fab6..db29ea71e80a 100644
--- a/net/ax25/ax25_timer.c
+++ b/net/ax25/ax25_timer.c
@@ -40,63 +40,45 @@ static void ax25_t2timer_expiry(unsigned long);
static void ax25_t3timer_expiry(unsigned long);
static void ax25_idletimer_expiry(unsigned long);
-void ax25_start_heartbeat(ax25_cb *ax25)
+void ax25_setup_timers(ax25_cb *ax25)
{
- del_timer(&ax25->timer);
-
- ax25->timer.data = (unsigned long)ax25;
- ax25->timer.function = &ax25_heartbeat_expiry;
- ax25->timer.expires = jiffies + 5 * HZ;
+ setup_timer(&ax25->timer, ax25_heartbeat_expiry, (unsigned long)ax25);
+ setup_timer(&ax25->t1timer, ax25_t1timer_expiry, (unsigned long)ax25);
+ setup_timer(&ax25->t2timer, ax25_t2timer_expiry, (unsigned long)ax25);
+ setup_timer(&ax25->t3timer, ax25_t3timer_expiry, (unsigned long)ax25);
+ setup_timer(&ax25->idletimer, ax25_idletimer_expiry,
+ (unsigned long)ax25);
+}
- add_timer(&ax25->timer);
+void ax25_start_heartbeat(ax25_cb *ax25)
+{
+ mod_timer(&ax25->timer, jiffies + 5 * HZ);
}
void ax25_start_t1timer(ax25_cb *ax25)
{
- del_timer(&ax25->t1timer);
-
- ax25->t1timer.data = (unsigned long)ax25;
- ax25->t1timer.function = &ax25_t1timer_expiry;
- ax25->t1timer.expires = jiffies + ax25->t1;
-
- add_timer(&ax25->t1timer);
+ mod_timer(&ax25->t1timer, jiffies + ax25->t1);
}
void ax25_start_t2timer(ax25_cb *ax25)
{
- del_timer(&ax25->t2timer);
-
- ax25->t2timer.data = (unsigned long)ax25;
- ax25->t2timer.function = &ax25_t2timer_expiry;
- ax25->t2timer.expires = jiffies + ax25->t2;
-
- add_timer(&ax25->t2timer);
+ mod_timer(&ax25->t2timer, jiffies + ax25->t2);
}
void ax25_start_t3timer(ax25_cb *ax25)
{
- del_timer(&ax25->t3timer);
-
- if (ax25->t3 > 0) {
- ax25->t3timer.data = (unsigned long)ax25;
- ax25->t3timer.function = &ax25_t3timer_expiry;
- ax25->t3timer.expires = jiffies + ax25->t3;
-
- add_timer(&ax25->t3timer);
- }
+ if (ax25->t3 > 0)
+ mod_timer(&ax25->t3timer, jiffies + ax25->t3);
+ else
+ del_timer(&ax25->t3timer);
}
void ax25_start_idletimer(ax25_cb *ax25)
{
- del_timer(&ax25->idletimer);
-
- if (ax25->idle > 0) {
- ax25->idletimer.data = (unsigned long)ax25;
- ax25->idletimer.function = &ax25_idletimer_expiry;
- ax25->idletimer.expires = jiffies + ax25->idle;
-
- add_timer(&ax25->idletimer);
- }
+ if (ax25->idle > 0)
+ mod_timer(&ax25->idletimer, jiffies + ax25->idle);
+ else
+ del_timer(&ax25->idletimer);
}
void ax25_stop_heartbeat(ax25_cb *ax25)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5fc7be206f62..f8880261da0e 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -260,7 +260,6 @@ int hci_conn_del(struct hci_conn *conn)
tasklet_enable(&hdev->tx_task);
skb_queue_purge(&conn->data_q);
hci_conn_del_sysfs(conn);
- hci_dev_put(hdev);
return 0;
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 372b0d3b75a8..930b58e7149a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -24,6 +24,7 @@
/* Bluetooth HCI core. */
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/kmod.h>
@@ -1321,7 +1322,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
- if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
+ if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
hci_acl_tx_to(hdev);
}
@@ -1543,7 +1544,7 @@ static void hci_cmd_task(unsigned long arg)
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
- if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
+ if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index e13cf5ef144c..84360c117d4e 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -320,28 +320,34 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
queue_work(btaddconn, &conn->work);
}
+/*
+ * The rfcomm tty device will possibly retain even when conn
+ * is down, and sysfs doesn't support move zombie device,
+ * so we should move the device before conn device is destroyed.
+ */
static int __match_tty(struct device *dev, void *data)
{
- /* The rfcomm tty device will possibly retain even when conn
- * is down, and sysfs doesn't support move zombie device,
- * so we should move the device before conn device is destroyed.
- * Due to the only child device of hci_conn dev is rfcomm
- * tty_dev, here just return 1
- */
- return 1;
+ return !strncmp(dev->bus_id, "rfcomm", 6);
}
static void del_conn(struct work_struct *work)
{
- struct device *dev;
struct hci_conn *conn = container_of(work, struct hci_conn, work);
+ struct hci_dev *hdev = conn->hdev;
+
+ while (1) {
+ struct device *dev;
- while (dev = device_find_child(&conn->dev, NULL, __match_tty)) {
+ dev = device_find_child(&conn->dev, NULL, __match_tty);
+ if (!dev)
+ break;
device_move(dev, NULL);
put_device(dev);
}
+
device_del(&conn->dev);
put_device(&conn->dev);
+ hci_dev_put(hdev);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index e700cbf634c2..ca64c1cc1b47 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -20,8 +20,8 @@ static int ebt_target_dnat(struct sk_buff *skb, unsigned int hooknr,
{
const struct ebt_nat_info *info = data;
- if (skb_make_writable(skb, 0))
- return NF_DROP;
+ if (!skb_make_writable(skb, 0))
+ return EBT_DROP;
memcpy(eth_hdr(skb)->h_dest, info->mac, ETH_ALEN);
return info->target;
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index bfdf2fb60b1f..b8afe850cf1e 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -21,8 +21,8 @@ static int ebt_target_redirect(struct sk_buff *skb, unsigned int hooknr,
{
const struct ebt_redirect_info *info = data;
- if (skb_make_writable(skb, 0))
- return NF_DROP;
+ if (!skb_make_writable(skb, 0))
+ return EBT_DROP;
if (hooknr != NF_BR_BROUTING)
memcpy(eth_hdr(skb)->h_dest,
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index e252dabbb143..5425333dda03 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -22,8 +22,8 @@ static int ebt_target_snat(struct sk_buff *skb, unsigned int hooknr,
{
const struct ebt_nat_info *info = data;
- if (skb_make_writable(skb, 0))
- return NF_DROP;
+ if (!skb_make_writable(skb, 0))
+ return EBT_DROP;
memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN);
if (!(info->target & NAT_ARP_BIT) &&
diff --git a/net/core/dev.c b/net/core/dev.c
index b3e19ae57f95..fcdf03cf3b3f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1071,8 +1071,6 @@ int dev_close(struct net_device *dev)
*/
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
- dev_deactivate(dev);
-
clear_bit(__LINK_STATE_START, &dev->state);
/* Synchronize to scheduled poll. We cannot touch poll list,
@@ -1083,6 +1081,8 @@ int dev_close(struct net_device *dev)
*/
smp_mb__after_clear_bit(); /* Commit netif_running(). */
+ dev_deactivate(dev);
+
/*
* Call the device specific close. This cannot fail.
* Only if device is UP
@@ -2900,7 +2900,7 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
}
}
- da = kmalloc(sizeof(*da), GFP_ATOMIC);
+ da = kzalloc(sizeof(*da), GFP_ATOMIC);
if (da == NULL)
return -ENOMEM;
memcpy(da->da_addr, addr, alen);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a16cf1ec5e5e..2328acbd16cd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -358,11 +358,12 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
{
struct neighbour *n;
int key_len = tbl->key_len;
- u32 hash_val = tbl->hash(pkey, dev);
+ u32 hash_val;
NEIGH_CACHE_STAT_INC(tbl, lookups);
read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, dev);
for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
neigh_hold(n);
@@ -379,11 +380,12 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
{
struct neighbour *n;
int key_len = tbl->key_len;
- u32 hash_val = tbl->hash(pkey, NULL);
+ u32 hash_val;
NEIGH_CACHE_STAT_INC(tbl, lookups);
read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, NULL);
for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
if (!memcmp(n->primary_key, pkey, key_len) &&
(net == n->dev->nd_net)) {
@@ -507,6 +509,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (tbl->pconstructor && tbl->pconstructor(n)) {
if (dev)
dev_put(dev);
+ release_net(net);
kfree(n);
n = NULL;
goto out;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 61ac8d06292c..2bd9c5f7627d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -689,10 +689,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
[IFLA_MTU] = { .type = NLA_U32 },
+ [IFLA_LINK] = { .type = NLA_U32 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
[IFLA_LINKMODE] = { .type = NLA_U8 },
+ [IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
};
@@ -720,6 +722,21 @@ static struct net *get_net_ns_by_pid(pid_t pid)
return net;
}
+static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+{
+ if (dev) {
+ if (tb[IFLA_ADDRESS] &&
+ nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
+ return -EINVAL;
+
+ if (tb[IFLA_BROADCAST] &&
+ nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
@@ -892,12 +909,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
goto errout;
}
- if (tb[IFLA_ADDRESS] &&
- nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
- goto errout_dev;
-
- if (tb[IFLA_BROADCAST] &&
- nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
+ if ((err = validate_linkmsg(dev, tb)) < 0)
goto errout_dev;
err = do_setlink(dev, ifm, tb, ifname, 0);
@@ -1018,6 +1030,9 @@ replay:
else
dev = NULL;
+ if ((err = validate_linkmsg(dev, tb)) < 0)
+ return err;
+
if (tb[IFLA_LINKINFO]) {
err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
tb[IFLA_LINKINFO], ifla_info_policy);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cfc07dac636c..0d0fd28a9041 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2106,11 +2106,10 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
- * @start: start of data before pull
* @len: length of data pulled
*
* This function performs an skb_pull on the packet and updates
- * update the CHECKSUM_COMPLETE checksum. It should be used on
+ * the CHECKSUM_COMPLETE checksum. It should be used on
* receive path processing instead of skb_pull unless you know
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 9d4555ec0b59..8219b7e0968d 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -96,7 +96,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->reserved = 0;
ah->spi = x->id.spi;
- ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
+ ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
spin_lock_bh(&x->lock);
err = ah_mac_digest(ahp, skb, ah->auth_data);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 258d17631b4b..091e6709f831 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -199,7 +199,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
}
esph->spi = x->id.spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
@@ -210,7 +210,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
- aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
+ aead_givcrypt_set_giv(req, esph->enc_data,
+ XFRM_SKB_CB(skb)->seq.output);
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_givencrypt(req);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 76b9c684cccd..8d58d85dfac6 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -372,7 +372,8 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
{
struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fib_node *new_f, *f;
+ struct fib_node *new_f = NULL;
+ struct fib_node *f;
struct fib_alias *fa, *new_fa;
struct fn_zone *fz;
struct fib_info *fi;
@@ -496,7 +497,6 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
err = -ENOBUFS;
- new_f = NULL;
if (!f) {
new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
if (new_f == NULL)
@@ -512,7 +512,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
if (new_fa->fa_info != NULL) {
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (new_fa == NULL)
- goto out_free_new_f;
+ goto out;
}
new_fa->fa_info = fi;
new_fa->fa_tos = tos;
@@ -540,9 +540,9 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
&cfg->fc_nlinfo, 0);
return 0;
-out_free_new_f:
- kmem_cache_free(fn_hash_kmem, new_f);
out:
+ if (new_f)
+ kmem_cache_free(fn_hash_kmem, new_f);
fib_release_info(fi);
return err;
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f5fba3f71c06..1ff446d0fa8b 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1762,11 +1762,9 @@ static struct leaf *trie_leafindex(struct trie *t, int index)
{
struct leaf *l = trie_firstleaf(t);
- while (index-- > 0) {
+ while (l && index-- > 0)
l = trie_nextleaf(l);
- if (!l)
- break;
- }
+
return l;
}
@@ -2461,6 +2459,84 @@ static const struct file_operations fib_trie_fops = {
.release = seq_release_net,
};
+struct fib_route_iter {
+ struct seq_net_private p;
+ struct trie *main_trie;
+ loff_t pos;
+ t_key key;
+};
+
+static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
+{
+ struct leaf *l = NULL;
+ struct trie *t = iter->main_trie;
+
+ /* use cache location of last found key */
+ if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
+ pos -= iter->pos;
+ else {
+ iter->pos = 0;
+ l = trie_firstleaf(t);
+ }
+
+ while (l && pos-- > 0) {
+ iter->pos++;
+ l = trie_nextleaf(l);
+ }
+
+ if (l)
+ iter->key = pos; /* remember it */
+ else
+ iter->pos = 0; /* forget it */
+
+ return l;
+}
+
+static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
+{
+ struct fib_route_iter *iter = seq->private;
+ struct fib_table *tb;
+
+ rcu_read_lock();
+ tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
+ if (!tb)
+ return NULL;
+
+ iter->main_trie = (struct trie *) tb->tb_data;
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+ else
+ return fib_route_get_idx(iter, *pos - 1);
+}
+
+static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct fib_route_iter *iter = seq->private;
+ struct leaf *l = v;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN) {
+ iter->pos = 0;
+ l = trie_firstleaf(iter->main_trie);
+ } else {
+ iter->pos++;
+ l = trie_nextleaf(l);
+ }
+
+ if (l)
+ iter->key = l->key;
+ else
+ iter->pos = 0;
+ return l;
+}
+
+static void fib_route_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU)
+{
+ rcu_read_unlock();
+}
+
static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
{
static unsigned type2flags[RTN_MAX + 1] = {
@@ -2484,7 +2560,6 @@ static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
{
- const struct fib_trie_iter *iter = seq->private;
struct leaf *l = v;
struct leaf_info *li;
struct hlist_node *node;
@@ -2496,12 +2571,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
return 0;
}
- if (iter->trie == iter->trie_local)
- return 0;
-
- if (IS_TNODE(l))
- return 0;
-
hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
struct fib_alias *fa;
__be32 mask, prefix;
@@ -2544,16 +2613,16 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
}
static const struct seq_operations fib_route_seq_ops = {
- .start = fib_trie_seq_start,
- .next = fib_trie_seq_next,
- .stop = fib_trie_seq_stop,
+ .start = fib_route_seq_start,
+ .next = fib_route_seq_next,
+ .stop = fib_route_seq_stop,
.show = fib_route_seq_show,
};
static int fib_route_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &fib_route_seq_ops,
- sizeof(struct fib_trie_iter));
+ sizeof(struct fib_route_iter));
}
static const struct file_operations fib_route_fops = {
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 9cac6c034abd..1aba606f6bbb 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -120,8 +120,6 @@ void inet_listen_wlock(struct inet_hashinfo *hashinfo)
}
}
-EXPORT_SYMBOL(inet_listen_wlock);
-
/*
* Don't inline this cruft. Here are some nice properties to exploit here. The
* BSD API does not allow a listening sock to specify the remote port nor the
@@ -494,7 +492,6 @@ out:
return ret;
}
}
-EXPORT_SYMBOL_GPL(__inet_hash_connect);
/*
* Bind a port for a connect operation and hash it.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 63f691719353..906cb1ada4c3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -259,16 +259,8 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- int i;
- for (i=1; i<100; i++) {
- sprintf(name, "gre%d", i);
- if (__dev_get_by_name(&init_net, name) == NULL)
- break;
- }
- if (i==100)
- goto failed;
- }
+ else
+ sprintf(name, "gre%%d");
dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
if (!dev)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 754b0a5bbfe9..de0572c88859 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -514,11 +514,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
val &= ~3;
val |= inet->tos & 3;
}
- if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
- !capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- break;
- }
if (inet->tos != val) {
inet->tos = val;
sk->sk_priority = rt_tos2priority(val);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index a52b5853aaa8..10013ccee8dd 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1390,7 +1390,7 @@ static int __init ip_auto_config(void)
* Clue in the operator.
*/
printk("IP-Config: Complete:");
- printk("\n device=%s", ic_dev->name);
+ printk("\n device=%s", ic_dev->name);
printk(", addr=%u.%u.%u.%u", NIPQUAD(ic_myaddr));
printk(", mask=%u.%u.%u.%u", NIPQUAD(ic_netmask));
printk(", gw=%u.%u.%u.%u", NIPQUAD(ic_gateway));
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index da281581692c..e77e3b855834 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -221,16 +221,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- int i;
- for (i=1; i<100; i++) {
- sprintf(name, "tunl%d", i);
- if (__dev_get_by_name(&init_net, name) == NULL)
- break;
- }
- if (i==100)
- goto failed;
- }
+ else
+ sprintf(name, "tunl%%d");
dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
if (dev == NULL)
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index 45fa4e20094a..3f4222b0a803 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -19,7 +19,7 @@ target(struct sk_buff *skb,
unsigned char *arpptr;
int pln, hln;
- if (skb_make_writable(skb, skb->len))
+ if (!skb_make_writable(skb, skb->len))
return NF_DROP;
arp = arp_hdr(skb);
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 6bda1102851b..fe05da41d6ba 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -283,8 +283,8 @@ static int
ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
{
int diff;
- int err;
struct iphdr *user_iph = (struct iphdr *)v->payload;
+ struct sk_buff *nskb;
if (v->data_len < sizeof(*user_iph))
return 0;
@@ -296,14 +296,16 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
if (v->data_len > 0xFFFF)
return -EINVAL;
if (diff > skb_tailroom(e->skb)) {
- err = pskb_expand_head(e->skb, 0,
+ nskb = skb_copy_expand(e->skb, 0,
diff - skb_tailroom(e->skb),
GFP_ATOMIC);
- if (err) {
+ if (!nskb) {
printk(KERN_WARNING "ip_queue: error "
- "in mangle, dropping packet: %d\n", -err);
- return err;
+ "in mangle, dropping packet\n");
+ return -ENOMEM;
}
+ kfree_skb(e->skb);
+ e->skb = nskb;
}
skb_put(e->skb, diff);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 63414ea427c5..00156bf421ca 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -719,7 +719,7 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
}
/*
- * Send a SYN-ACK after having received an ACK.
+ * Send a SYN-ACK after having received a SYN.
* This still operates on a request_sock only, not on a big
* socket.
*/
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bddac0e8780f..f0aa97738746 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -752,14 +752,6 @@ static int __init inet6_init(void)
BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
-#ifdef MODULE
-#if 0 /* FIXME --RR */
- if (!mod_member_present(&__this_module, can_unload))
- return -EINVAL;
-
- __this_module.can_unload = &ipv6_unload;
-#endif
-#endif
err = proto_register(&tcpv6_prot, 1);
if (err)
goto out;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 379c8e04c36c..2ff0c8233e47 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -283,7 +283,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->reserved = 0;
ah->spi = x->id.spi;
- ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
+ ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
spin_lock_bh(&x->lock);
err = ah_mac_digest(ahp, skb, ah->auth_data);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 8e0f1428c716..0ec1402320ea 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -188,7 +188,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
*skb_mac_header(skb) = IPPROTO_ESP;
esph->spi = x->id.spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
@@ -199,7 +199,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
- aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
+ aead_givcrypt_set_giv(req, esph->enc_data,
+ XFRM_SKB_CB(skb)->seq.output);
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_givencrypt(req);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f93407cf6515..bab72b6f1444 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1151,7 +1151,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
fn = fn->parent;
}
/* No more references are possible at this point. */
- if (atomic_read(&rt->rt6i_ref) != 1) BUG();
+ BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
}
inet6_rt_notify(RTM_DELROUTE, rt, info);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9ac6ca2521c3..8b67ca07467d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -621,7 +621,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
* or if the skb it not generated by a local socket. (This last
* check should be redundant, but it's free.)
*/
- if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) {
+ if (!skb->local_df) {
skb->dev = skb->dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
@@ -1420,6 +1420,10 @@ int ip6_push_pending_frames(struct sock *sk)
tmp_skb->sk = NULL;
}
+ /* Allow local fragmentation. */
+ if (np->pmtudisc < IPV6_PMTUDISC_DO)
+ skb->local_df = 1;
+
ipv6_addr_copy(final_dst, &fl->fl6_dst);
__skb_pull(skb, skb_network_header_len(skb));
if (opt && opt->opt_flen)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9031e521c1df..2a124e9a1b2d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -229,18 +229,11 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
char name[IFNAMSIZ];
int err;
- if (p->name[0]) {
+ if (p->name[0])
strlcpy(name, p->name, IFNAMSIZ);
- } else {
- int i;
- for (i = 1; i < IP6_TNL_MAX; i++) {
- sprintf(name, "ip6tnl%d", i);
- if (__dev_get_by_name(&init_net, name) == NULL)
- break;
- }
- if (i == IP6_TNL_MAX)
- goto failed;
- }
+ else
+ sprintf(name, "ip6tnl%%d");
+
dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
if (dev == NULL)
goto failed;
@@ -550,6 +543,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ip_rt_put(rt);
goto out;
}
+ skb2->dst = (struct dst_entry *)rt;
} else {
ip_rt_put(rt);
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index e869916b05f1..cc2f9afcf808 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -285,8 +285,8 @@ static int
ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
{
int diff;
- int err;
struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload;
+ struct sk_buff *nskb;
if (v->data_len < sizeof(*user_iph))
return 0;
@@ -298,14 +298,16 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
if (v->data_len > 0xFFFF)
return -EINVAL;
if (diff > skb_tailroom(e->skb)) {
- err = pskb_expand_head(e->skb, 0,
+ nskb = skb_copy_expand(e->skb, 0,
diff - skb_tailroom(e->skb),
GFP_ATOMIC);
- if (err) {
+ if (!nskb) {
printk(KERN_WARNING "ip6_queue: OOM "
"in mangle, dropping packet\n");
- return err;
+ return -ENOMEM;
}
+ kfree_skb(e->skb);
+ e->skb = nskb;
}
skb_put(e->skb, diff);
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e77239d02bf5..dde7801abeff 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -164,16 +164,8 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- int i;
- for (i=1; i<100; i++) {
- sprintf(name, "sit%d", i);
- if (__dev_get_by_name(&init_net, name) == NULL)
- break;
- }
- if (i==100)
- goto failed;
- }
+ else
+ sprintf(name, "sit%%d");
dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
if (dev == NULL)
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index b34c58c65656..79ccfb080733 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -36,7 +36,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- if (skb->len > mtu) {
+ if (!skb->local_df && skb->len > mtu) {
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
ret = -EMSGSIZE;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index b3ac85e808ac..1c853927810a 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2291,6 +2291,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
return 0;
out:
+ xp->dead = 1;
xfrm_policy_destroy(xp);
return err;
}
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 67b7c75c430d..28bcdf9fc3df 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -165,6 +165,7 @@ static int ieee80211_open(struct net_device *dev)
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_if_init_conf conf;
int res;
+ bool need_hw_reconfig = 0;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -218,7 +219,7 @@ static int ieee80211_open(struct net_device *dev)
res = local->ops->start(local_to_hw(local));
if (res)
return res;
- ieee80211_hw_config(local);
+ need_hw_reconfig = 1;
ieee80211_led_radio(local, local->hw.conf.radio_enabled);
}
@@ -282,6 +283,8 @@ static int ieee80211_open(struct net_device *dev)
atomic_inc(&local->iff_promiscs);
local->open_count++;
+ if (need_hw_reconfig)
+ ieee80211_hw_config(local);
netif_start_queue(dev);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 202d7fa09483..62567959b66e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -945,7 +945,7 @@ static int tcp_packet(struct nf_conn *ct,
ct->proto.tcp.state = new_state;
if (old_state != new_state
- && new_state == TCP_CONNTRACK_CLOSE)
+ && new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
timeout = ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans
&& tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a48b20fe9cd6..0043d3a9f87e 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,8 +443,8 @@ err_out:
static int
nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
{
+ struct sk_buff *nskb;
int diff;
- int err;
diff = data_len - e->skb->len;
if (diff < 0) {
@@ -454,14 +454,16 @@ nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
if (data_len > 0xFFFF)
return -EINVAL;
if (diff > skb_tailroom(e->skb)) {
- err = pskb_expand_head(e->skb, 0,
+ nskb = skb_copy_expand(e->skb, 0,
diff - skb_tailroom(e->skb),
GFP_ATOMIC);
- if (err) {
+ if (!nskb) {
printk(KERN_WARNING "nf_queue: OOM "
"in mangle, dropping packet\n");
- return err;
+ return -ENOMEM;
}
+ kfree_skb(e->skb);
+ e->skb = nskb;
}
skb_put(e->skb, diff);
}
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 7708e2084ce2..c0284856ccd4 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -111,7 +111,7 @@ secmark_tg_check(const char *tablename, const void *entry,
return true;
}
-void secmark_tg_destroy(const struct xt_target *target, void *targinfo)
+static void secmark_tg_destroy(const struct xt_target *target, void *targinfo)
{
switch (mode) {
case SECMARK_MODE_SEL:
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 744c7f2ab0b1..5418ce59ac3a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -774,9 +774,6 @@ hashlimit_mt_check(const char *tablename, const void *inf,
return false;
}
mutex_unlock(&hlimit_mutex);
-
- /* Ugly hack: For SMP, we only want to use one set */
- info->master = info;
return true;
}
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 4f984dc60319..500528d60cd7 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -102,7 +102,7 @@ iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
int r;
for (i = 0; i < 4; ++i) {
- r = (__force u32)a->s6_addr32[i] - (__force u32)b->s6_addr32[i];
+ r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]);
if (r != 0)
return r;
}
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 9b8ed390a8e0..627e0f336d54 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -26,7 +26,6 @@ static bool u32_match_it(const struct xt_u32 *data,
u_int32_t pos;
u_int32_t val;
u_int32_t at;
- int ret;
/*
* Small example: "0 >> 28 == 4 && 8 & 0xFF0000 >> 16 = 6, 17"
@@ -40,8 +39,8 @@ static bool u32_match_it(const struct xt_u32 *data,
if (skb->len < 4 || pos > skb->len - 4)
return false;
- ret = skb_copy_bits(skb, pos, &n, sizeof(n));
- BUG_ON(ret < 0);
+ if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0)
+ BUG();
val = ntohl(n);
nnums = ct->nnums;
@@ -67,9 +66,9 @@ static bool u32_match_it(const struct xt_u32 *data,
pos > skb->len - at - 4)
return false;
- ret = skb_copy_bits(skb, at + pos, &n,
- sizeof(n));
- BUG_ON(ret < 0);
+ if (skb_copy_bits(skb, at + pos, &n,
+ sizeof(n)) < 0)
+ BUG();
val = ntohl(n);
break;
}
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index c7ad64d664ad..fdc14a0d21af 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -718,36 +718,35 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
* NetLabel Generic NETLINK Command Definitions
*/
-static struct genl_ops netlbl_cipsov4_genl_c_add = {
+static struct genl_ops netlbl_cipsov4_ops[] = {
+ {
.cmd = NLBL_CIPSOV4_C_ADD,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_cipsov4_genl_policy,
.doit = netlbl_cipsov4_add,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_cipsov4_genl_c_remove = {
+ },
+ {
.cmd = NLBL_CIPSOV4_C_REMOVE,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_cipsov4_genl_policy,
.doit = netlbl_cipsov4_remove,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_cipsov4_genl_c_list = {
+ },
+ {
.cmd = NLBL_CIPSOV4_C_LIST,
.flags = 0,
.policy = netlbl_cipsov4_genl_policy,
.doit = netlbl_cipsov4_list,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_cipsov4_genl_c_listall = {
+ },
+ {
.cmd = NLBL_CIPSOV4_C_LISTALL,
.flags = 0,
.policy = netlbl_cipsov4_genl_policy,
.doit = NULL,
.dumpit = netlbl_cipsov4_listall,
+ },
};
/*
@@ -762,30 +761,20 @@ static struct genl_ops netlbl_cipsov4_genl_c_listall = {
* mechanism. Returns zero on success, negative values on failure.
*
*/
-int netlbl_cipsov4_genl_init(void)
+int __init netlbl_cipsov4_genl_init(void)
{
- int ret_val;
+ int ret_val, i;
ret_val = genl_register_family(&netlbl_cipsov4_gnl_family);
if (ret_val != 0)
return ret_val;
- ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
- &netlbl_cipsov4_genl_c_add);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
- &netlbl_cipsov4_genl_c_remove);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
- &netlbl_cipsov4_genl_c_list);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
- &netlbl_cipsov4_genl_c_listall);
- if (ret_val != 0)
- return ret_val;
+ for (i = 0; i < ARRAY_SIZE(netlbl_cipsov4_ops); i++) {
+ ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family,
+ &netlbl_cipsov4_ops[i]);
+ if (ret_val != 0)
+ return ret_val;
+ }
return 0;
}
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 9a8ea0195c4f..02c2f7c0b255 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -150,11 +150,11 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
entry = netlbl_domhsh_search(domain);
if (entry == NULL) {
entry = rcu_dereference(netlbl_domhsh_def);
- if (entry != NULL && entry->valid)
- return entry;
+ if (entry != NULL && !entry->valid)
+ entry = NULL;
}
- return NULL;
+ return entry;
}
/*
@@ -171,7 +171,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
* values on error.
*
*/
-int netlbl_domhsh_init(u32 size)
+int __init netlbl_domhsh_init(u32 size)
{
u32 iter;
struct netlbl_domhsh_tbl *hsh_tbl;
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index e2258dc3c845..22c191267808 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -517,68 +517,63 @@ version_failure:
* NetLabel Generic NETLINK Command Definitions
*/
-static struct genl_ops netlbl_mgmt_genl_c_add = {
+static struct genl_ops netlbl_mgmt_genl_ops[] = {
+ {
.cmd = NLBL_MGMT_C_ADD,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_add,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_remove = {
+ },
+ {
.cmd = NLBL_MGMT_C_REMOVE,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_remove,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_listall = {
+ },
+ {
.cmd = NLBL_MGMT_C_LISTALL,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = NULL,
.dumpit = netlbl_mgmt_listall,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_adddef = {
+ },
+ {
.cmd = NLBL_MGMT_C_ADDDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_adddef,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_removedef = {
+ },
+ {
.cmd = NLBL_MGMT_C_REMOVEDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_removedef,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_listdef = {
+ },
+ {
.cmd = NLBL_MGMT_C_LISTDEF,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_listdef,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_protocols = {
+ },
+ {
.cmd = NLBL_MGMT_C_PROTOCOLS,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = NULL,
.dumpit = netlbl_mgmt_protocols,
-};
-
-static struct genl_ops netlbl_mgmt_genl_c_version = {
+ },
+ {
.cmd = NLBL_MGMT_C_VERSION,
.flags = 0,
.policy = netlbl_mgmt_genl_policy,
.doit = netlbl_mgmt_version,
.dumpit = NULL,
+ },
};
/*
@@ -593,46 +588,20 @@ static struct genl_ops netlbl_mgmt_genl_c_version = {
* mechanism. Returns zero on success, negative values on failure.
*
*/
-int netlbl_mgmt_genl_init(void)
+int __init netlbl_mgmt_genl_init(void)
{
- int ret_val;
+ int ret_val, i;
ret_val = genl_register_family(&netlbl_mgmt_gnl_family);
if (ret_val != 0)
return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_add);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_remove);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_listall);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_adddef);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_removedef);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_listdef);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_protocols);
- if (ret_val != 0)
- return ret_val;
- ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
- &netlbl_mgmt_genl_c_version);
- if (ret_val != 0)
- return ret_val;
+ for (i = 0; i < ARRAY_SIZE(netlbl_mgmt_genl_ops); i++) {
+ ret_val = genl_register_ops(&netlbl_mgmt_gnl_family,
+ &netlbl_mgmt_genl_ops[i]);
+ if (ret_val != 0)
+ return ret_val;
+ }
return 0;
}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 42e81fd8cc49..4478f2f6079d 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -180,6 +180,7 @@ static void netlbl_unlabel_audit_addr4(struct audit_buffer *audit_buf,
}
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
/**
* netlbl_unlabel_audit_addr6 - Audit an IPv6 address
* @audit_buf: audit buffer
@@ -213,6 +214,7 @@ static void netlbl_unlabel_audit_addr6(struct audit_buffer *audit_buf,
audit_log_format(audit_buf, " src_prefixlen=%d", mask_len);
}
}
+#endif /* IPv6 */
/*
* Unlabeled Connection Hash Table Functions
@@ -617,8 +619,6 @@ static int netlbl_unlhsh_add(struct net *net,
int ifindex;
struct net_device *dev;
struct netlbl_unlhsh_iface *iface;
- struct in_addr *addr4, *mask4;
- struct in6_addr *addr6, *mask6;
struct audit_buffer *audit_buf = NULL;
char *secctx = NULL;
u32 secctx_len;
@@ -651,7 +651,9 @@ static int netlbl_unlhsh_add(struct net *net,
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCADD,
audit_info);
switch (addr_len) {
- case sizeof(struct in_addr):
+ case sizeof(struct in_addr): {
+ struct in_addr *addr4, *mask4;
+
addr4 = (struct in_addr *)addr;
mask4 = (struct in_addr *)mask;
ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid);
@@ -661,8 +663,11 @@ static int netlbl_unlhsh_add(struct net *net,
addr4->s_addr,
mask4->s_addr);
break;
+ }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case sizeof(struct in6_addr):
+ case sizeof(struct in6_addr): {
+ struct in6_addr *addr6, *mask6;
+
addr6 = (struct in6_addr *)addr;
mask6 = (struct in6_addr *)mask;
ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid);
@@ -671,6 +676,7 @@ static int netlbl_unlhsh_add(struct net *net,
dev_name,
addr6, mask6);
break;
+ }
#endif /* IPv6 */
default:
ret_val = -EINVAL;
@@ -1547,68 +1553,63 @@ unlabel_staticlistdef_return:
* NetLabel Generic NETLINK Command Definitions
*/
-static struct genl_ops netlbl_unlabel_genl_c_staticadd = {
+static struct genl_ops netlbl_unlabel_genl_ops[] = {
+ {
.cmd = NLBL_UNLABEL_C_STATICADD,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticadd,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_staticremove = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_STATICREMOVE,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticremove,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_staticlist = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_STATICLIST,
.flags = 0,
.policy = netlbl_unlabel_genl_policy,
.doit = NULL,
.dumpit = netlbl_unlabel_staticlist,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_staticadddef = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_STATICADDDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticadddef,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_staticremovedef = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_STATICREMOVEDEF,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_staticremovedef,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_staticlistdef = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_STATICLISTDEF,
.flags = 0,
.policy = netlbl_unlabel_genl_policy,
.doit = NULL,
.dumpit = netlbl_unlabel_staticlistdef,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_accept = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_ACCEPT,
.flags = GENL_ADMIN_PERM,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_accept,
.dumpit = NULL,
-};
-
-static struct genl_ops netlbl_unlabel_genl_c_list = {
+ },
+ {
.cmd = NLBL_UNLABEL_C_LIST,
.flags = 0,
.policy = netlbl_unlabel_genl_policy,
.doit = netlbl_unlabel_list,
.dumpit = NULL,
+ },
};
/*
@@ -1623,53 +1624,20 @@ static struct genl_ops netlbl_unlabel_genl_c_list = {
* mechanism. Returns zero on success, negative values on failure.
*
*/
-int netlbl_unlabel_genl_init(void)
+int __init netlbl_unlabel_genl_init(void)
{
- int ret_val;
+ int ret_val, i;
ret_val = genl_register_family(&netlbl_unlabel_gnl_family);
if (ret_val != 0)
return ret_val;
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_staticadd);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_staticremove);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_staticlist);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_staticadddef);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_staticremovedef);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_staticlistdef);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_accept);
- if (ret_val != 0)
- return ret_val;
-
- ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
- &netlbl_unlabel_genl_c_list);
- if (ret_val != 0)
- return ret_val;
+ for (i = 0; i < ARRAY_SIZE(netlbl_unlabel_genl_ops); i++) {
+ ret_val = genl_register_ops(&netlbl_unlabel_gnl_family,
+ &netlbl_unlabel_genl_ops[i]);
+ if (ret_val != 0)
+ return ret_val;
+ }
return 0;
}
@@ -1693,7 +1661,7 @@ static struct notifier_block netlbl_unlhsh_netdev_notifier = {
* non-zero values on error.
*
*/
-int netlbl_unlabel_init(u32 size)
+int __init netlbl_unlabel_init(u32 size)
{
u32 iter;
struct netlbl_unlhsh_tbl *hsh_tbl;
@@ -1741,10 +1709,6 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
u16 family,
struct netlbl_lsm_secattr *secattr)
{
- struct iphdr *hdr4;
- struct ipv6hdr *hdr6;
- struct netlbl_unlhsh_addr4 *addr4;
- struct netlbl_unlhsh_addr6 *addr6;
struct netlbl_unlhsh_iface *iface;
rcu_read_lock();
@@ -1752,21 +1716,29 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
if (iface == NULL)
goto unlabel_getattr_nolabel;
switch (family) {
- case PF_INET:
+ case PF_INET: {
+ struct iphdr *hdr4;
+ struct netlbl_unlhsh_addr4 *addr4;
+
hdr4 = ip_hdr(skb);
addr4 = netlbl_unlhsh_search_addr4(hdr4->saddr, iface);
if (addr4 == NULL)
goto unlabel_getattr_nolabel;
secattr->attr.secid = addr4->secid;
break;
+ }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case PF_INET6:
+ case PF_INET6: {
+ struct ipv6hdr *hdr6;
+ struct netlbl_unlhsh_addr6 *addr6;
+
hdr6 = ipv6_hdr(skb);
addr6 = netlbl_unlhsh_search_addr6(&hdr6->saddr, iface);
if (addr6 == NULL)
goto unlabel_getattr_nolabel;
secattr->attr.secid = addr6->secid;
break;
+ }
#endif /* IPv6 */
default:
goto unlabel_getattr_nolabel;
@@ -1793,7 +1765,7 @@ unlabel_getattr_nolabel:
* and to send unlabeled network traffic by default.
*
*/
-int netlbl_unlabel_defconf(void)
+int __init netlbl_unlabel_defconf(void)
{
int ret_val;
struct netlbl_dom_map *entry;
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 85a96a3fddaa..b17d4203806e 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -59,7 +59,7 @@
* non-zero on failure.
*
*/
-int netlbl_netlink_init(void)
+int __init netlbl_netlink_init(void)
{
int ret_val;
@@ -96,7 +96,6 @@ int netlbl_netlink_init(void)
struct audit_buffer *netlbl_audit_start_common(int type,
struct netlbl_audit *audit_info)
{
- struct audit_context *audit_ctx = current->audit_context;
struct audit_buffer *audit_buf;
char *secctx;
u32 secctx_len;
@@ -104,7 +103,7 @@ struct audit_buffer *netlbl_audit_start_common(int type,
if (audit_enabled == 0)
return NULL;
- audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type);
+ audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
if (audit_buf == NULL)
return NULL;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 150579a21469..d16929c9b4bc 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -230,10 +230,8 @@ static void genl_unregister_mc_groups(struct genl_family *family)
{
struct genl_multicast_group *grp, *tmp;
- genl_lock();
list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
__genl_unregister_mc_group(family, grp);
- genl_unlock();
}
/**
@@ -396,10 +394,10 @@ int genl_unregister_family(struct genl_family *family)
{
struct genl_family *rc;
- genl_unregister_mc_groups(family);
-
genl_lock();
+ genl_unregister_mc_groups(family);
+
list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
if (family->id != rc->id || strcmp(rc->name, family->name))
continue;
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 1a47f5d1be17..140a0a8c6b02 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -232,7 +232,7 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
struct rfkill *rfkill = to_rfkill(dev);
if (dev->power.power_state.event != state.event) {
- if (state.event == PM_EVENT_SUSPEND) {
+ if (state.event & PM_EVENT_SLEEP) {
mutex_lock(&rfkill->mutex);
if (rfkill->state == RFKILL_STATE_ON)
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 92a87fde8bfe..bdfb77417794 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -156,8 +156,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
false);
spin_unlock(&call->lock);
notification = NULL;
- if (ret < 0)
- BUG();
+ BUG_ON(ret < 0);
}
spin_unlock(&call->conn->state_lock);
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 657ee69f2133..3ac1672e1070 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -814,8 +814,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
spin_lock_bh(&call->lock);
ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
spin_unlock_bh(&call->lock);
- if (ret < 0)
- BUG();
+ BUG_ON(ret < 0);
}
return 0;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d47d5787e2e5..44797ad88a05 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6488,6 +6488,7 @@ struct proto sctp_prot = {
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
+ .sockets_allocated = &sctp_sockets_allocated,
REF_PROTO_INUSE(sctp)
};
@@ -6521,6 +6522,7 @@ struct proto sctpv6_prot = {
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
+ .sockets_allocated = &sctp_sockets_allocated,
REF_PROTO_INUSE(sctpv6)
};
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
diff --git a/net/socket.c b/net/socket.c
index 7651de008502..b6d35cd72a50 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -701,6 +701,9 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
{
struct socket *sock = file->private_data;
+ if (unlikely(!sock->ops->splice_read))
+ return -EINVAL;
+
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6dac38792288..ef6384961808 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -266,6 +266,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
BUG_ON(!list_empty(&gss_msg->list));
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
+ rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
kfree(gss_msg);
}
@@ -408,13 +409,13 @@ gss_refresh_upcall(struct rpc_task *task)
}
spin_lock(&inode->i_lock);
if (gss_cred->gc_upcall != NULL)
- rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
+ rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
task->tk_timeout = 0;
gss_cred->gc_upcall = gss_msg;
/* gss_upcall_callback will release the reference to gss_upcall_msg */
atomic_inc(&gss_msg->count);
- rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
+ rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
} else
err = gss_msg->msg.errno;
spin_unlock(&inode->i_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a23512bb240d..ea14314331b0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1066,7 +1066,7 @@ call_transmit(struct rpc_task *task)
if (task->tk_msg.rpc_proc->p_decode != NULL)
return;
task->tk_action = rpc_exit_task;
- rpc_wake_up_task(task);
+ rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
}
/*
@@ -1535,7 +1535,7 @@ void rpc_show_tasks(void)
proc = -1;
if (RPC_IS_QUEUED(t))
- rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
+ rpc_waitq = rpc_qname(t->tk_waitqueue);
printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
t->tk_pid, proc,
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0e3ead7e11b9..1b395a41a8b2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -495,7 +495,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
static void
rpc_release_path(struct nameidata *nd)
{
- path_release(nd);
+ path_put(&nd->path);
rpc_put_mount();
}
@@ -668,7 +668,8 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
if ((error = rpc_lookup_parent(path, nd)) != 0)
return ERR_PTR(error);
- dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
+ dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
+ 1);
if (IS_ERR(dentry))
rpc_release_path(nd);
return dentry;
@@ -695,7 +696,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
dentry = rpc_lookup_negative(path, &nd);
if (IS_ERR(dentry))
return dentry;
- dir = nd.dentry->d_inode;
+ dir = nd.path.dentry->d_inode;
if ((error = __rpc_mkdir(dir, dentry)) != 0)
goto err_dput;
RPC_I(dentry->d_inode)->private = rpc_client;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3164a0871cf0..f480c718b400 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -298,7 +298,7 @@ void rpcb_getport_async(struct rpc_task *task)
/* Put self on queue before sending rpcbind request, in case
* rpcb_getport_done completes before we return from rpc_run_task */
- rpc_sleep_on(&xprt->binding, task, NULL, NULL);
+ rpc_sleep_on(&xprt->binding, task, NULL);
/* Someone else may have bound if we slept */
if (xprt_bound(xprt)) {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4c669121e607..cae219c8caeb 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -38,9 +38,9 @@ static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
-static void __rpc_default_timer(struct rpc_task *task);
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
+static void __rpc_queue_timer_fn(unsigned long ptr);
/*
* RPC tasks sit here while waiting for conditions to improve.
@@ -57,41 +57,30 @@ struct workqueue_struct *rpciod_workqueue;
* queue->lock and bh_disabled in order to avoid races within
* rpc_run_timer().
*/
-static inline void
-__rpc_disable_timer(struct rpc_task *task)
+static void
+__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
+ if (task->tk_timeout == 0)
+ return;
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
- task->tk_timeout_fn = NULL;
task->tk_timeout = 0;
+ list_del(&task->u.tk_wait.timer_list);
+ if (list_empty(&queue->timer_list.list))
+ del_timer(&queue->timer_list.timer);
}
-/*
- * Run a timeout function.
- * We use the callback in order to allow __rpc_wake_up_task()
- * and friends to disable the timer synchronously on SMP systems
- * without calling del_timer_sync(). The latter could cause a
- * deadlock if called while we're holding spinlocks...
- */
-static void rpc_run_timer(struct rpc_task *task)
+static void
+rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
{
- void (*callback)(struct rpc_task *);
-
- callback = task->tk_timeout_fn;
- task->tk_timeout_fn = NULL;
- if (callback && RPC_IS_QUEUED(task)) {
- dprintk("RPC: %5u running timer\n", task->tk_pid);
- callback(task);
- }
- smp_mb__before_clear_bit();
- clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
- smp_mb__after_clear_bit();
+ queue->timer_list.expires = expires;
+ mod_timer(&queue->timer_list.timer, expires);
}
/*
* Set up a timer for the current task.
*/
-static inline void
-__rpc_add_timer(struct rpc_task *task, rpc_action timer)
+static void
+__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (!task->tk_timeout)
return;
@@ -99,27 +88,10 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer)
dprintk("RPC: %5u setting alarm for %lu ms\n",
task->tk_pid, task->tk_timeout * 1000 / HZ);
- if (timer)
- task->tk_timeout_fn = timer;
- else
- task->tk_timeout_fn = __rpc_default_timer;
- set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
- mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
-}
-
-/*
- * Delete any timer for the current task. Because we use del_timer_sync(),
- * this function should never be called while holding queue->lock.
- */
-static void
-rpc_delete_timer(struct rpc_task *task)
-{
- if (RPC_IS_QUEUED(task))
- return;
- if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
- del_singleshot_timer_sync(&task->tk_timer);
- dprintk("RPC: %5u deleting timer\n", task->tk_pid);
- }
+ task->u.tk_wait.expires = jiffies + task->tk_timeout;
+ if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
+ rpc_set_queue_timer(queue, task->u.tk_wait.expires);
+ list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
/*
@@ -161,7 +133,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
- task->u.tk_wait.rpc_waitq = queue;
+ task->tk_waitqueue = queue;
queue->qlen++;
rpc_set_queued(task);
@@ -181,22 +153,18 @@ static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
}
- list_del(&task->u.tk_wait.list);
}
/*
* Remove request from queue.
* Note: must be called with spin lock held.
*/
-static void __rpc_remove_wait_queue(struct rpc_task *task)
+static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
- struct rpc_wait_queue *queue;
- queue = task->u.tk_wait.rpc_waitq;
-
+ __rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
- else
- list_del(&task->u.tk_wait.list);
+ list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
@@ -229,6 +197,9 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
INIT_LIST_HEAD(&queue->tasks[i]);
queue->maxpriority = nr_queues - 1;
rpc_reset_waitqueue_priority(queue);
+ queue->qlen = 0;
+ setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
+ INIT_LIST_HEAD(&queue->timer_list.list);
#ifdef RPC_DEBUG
queue->name = qname;
#endif
@@ -245,6 +216,12 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
}
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
+void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
+{
+ del_timer_sync(&queue->timer_list.timer);
+}
+EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
+
static int rpc_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
@@ -313,7 +290,6 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
*/
static void rpc_make_runnable(struct rpc_task *task)
{
- BUG_ON(task->tk_timeout_fn);
rpc_clear_queued(task);
if (rpc_test_and_set_running(task))
return;
@@ -326,7 +302,7 @@ static void rpc_make_runnable(struct rpc_task *task)
int status;
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
- status = queue_work(task->tk_workqueue, &task->u.tk_work);
+ status = queue_work(rpciod_workqueue, &task->u.tk_work);
if (status < 0) {
printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
task->tk_status = status;
@@ -343,7 +319,7 @@ static void rpc_make_runnable(struct rpc_task *task)
* as it's on a wait queue.
*/
static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
- rpc_action action, rpc_action timer)
+ rpc_action action)
{
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
@@ -357,11 +333,11 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
BUG_ON(task->tk_callback != NULL);
task->tk_callback = action;
- __rpc_add_timer(task, timer);
+ __rpc_add_timer(q, task);
}
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
- rpc_action action, rpc_action timer)
+ rpc_action action)
{
/* Mark the task as being activated if so needed */
rpc_set_active(task);
@@ -370,18 +346,19 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
- __rpc_sleep_on(q, task, action, timer);
+ __rpc_sleep_on(q, task, action);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
+ * @queue: wait queue
* @task: task to be woken up
*
* Caller must hold queue->lock, and have cleared the task queued flag.
*/
-static void __rpc_do_wake_up_task(struct rpc_task *task)
+static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
@@ -395,8 +372,7 @@ static void __rpc_do_wake_up_task(struct rpc_task *task)
return;
}
- __rpc_disable_timer(task);
- __rpc_remove_wait_queue(task);
+ __rpc_remove_wait_queue(queue, task);
rpc_make_runnable(task);
@@ -404,48 +380,32 @@ static void __rpc_do_wake_up_task(struct rpc_task *task)
}
/*
- * Wake up the specified task
+ * Wake up a queued task while the queue lock is being held
*/
-static void __rpc_wake_up_task(struct rpc_task *task)
+static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
- if (rpc_start_wakeup(task)) {
- if (RPC_IS_QUEUED(task))
- __rpc_do_wake_up_task(task);
- rpc_finish_wakeup(task);
- }
+ if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
+ __rpc_do_wake_up_task(queue, task);
}
/*
- * Default timeout handler if none specified by user
+ * Wake up a task on a specific queue
*/
-static void
-__rpc_default_timer(struct rpc_task *task)
+void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
- dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
- task->tk_status = -ETIMEDOUT;
- rpc_wake_up_task(task);
+ spin_lock_bh(&queue->lock);
+ rpc_wake_up_task_queue_locked(queue, task);
+ spin_unlock_bh(&queue->lock);
}
+EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/*
* Wake up the specified task
*/
-void rpc_wake_up_task(struct rpc_task *task)
+static void rpc_wake_up_task(struct rpc_task *task)
{
- rcu_read_lock_bh();
- if (rpc_start_wakeup(task)) {
- if (RPC_IS_QUEUED(task)) {
- struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
-
- /* Note: we're already in a bh-safe context */
- spin_lock(&queue->lock);
- __rpc_do_wake_up_task(task);
- spin_unlock(&queue->lock);
- }
- rpc_finish_wakeup(task);
- }
- rcu_read_unlock_bh();
+ rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
-EXPORT_SYMBOL_GPL(rpc_wake_up_task);
/*
* Wake up the next task on a priority queue.
@@ -495,7 +455,7 @@ new_queue:
new_owner:
rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
- __rpc_wake_up_task(task);
+ rpc_wake_up_task_queue_locked(queue, task);
return task;
}
@@ -508,16 +468,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
dprintk("RPC: wake_up_next(%p \"%s\")\n",
queue, rpc_qname(queue));
- rcu_read_lock_bh();
- spin_lock(&queue->lock);
+ spin_lock_bh(&queue->lock);
if (RPC_IS_PRIORITY(queue))
task = __rpc_wake_up_next_priority(queue);
else {
task_for_first(task, &queue->tasks[0])
- __rpc_wake_up_task(task);
+ rpc_wake_up_task_queue_locked(queue, task);
}
- spin_unlock(&queue->lock);
- rcu_read_unlock_bh();
+ spin_unlock_bh(&queue->lock);
return task;
}
@@ -534,18 +492,16 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
struct rpc_task *task, *next;
struct list_head *head;
- rcu_read_lock_bh();
- spin_lock(&queue->lock);
+ spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list)
- __rpc_wake_up_task(task);
+ rpc_wake_up_task_queue_locked(queue, task);
if (head == &queue->tasks[0])
break;
head--;
}
- spin_unlock(&queue->lock);
- rcu_read_unlock_bh();
+ spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up);
@@ -561,26 +517,48 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
struct rpc_task *task, *next;
struct list_head *head;
- rcu_read_lock_bh();
- spin_lock(&queue->lock);
+ spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
task->tk_status = status;
- __rpc_wake_up_task(task);
+ rpc_wake_up_task_queue_locked(queue, task);
}
if (head == &queue->tasks[0])
break;
head--;
}
- spin_unlock(&queue->lock);
- rcu_read_unlock_bh();
+ spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
+static void __rpc_queue_timer_fn(unsigned long ptr)
+{
+ struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
+ struct rpc_task *task, *n;
+ unsigned long expires, now, timeo;
+
+ spin_lock(&queue->lock);
+ expires = now = jiffies;
+ list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
+ timeo = task->u.tk_wait.expires;
+ if (time_after_eq(now, timeo)) {
+ dprintk("RPC: %5u timeout\n", task->tk_pid);
+ task->tk_status = -ETIMEDOUT;
+ rpc_wake_up_task_queue_locked(queue, task);
+ continue;
+ }
+ if (expires == now || time_after(expires, timeo))
+ expires = timeo;
+ }
+ if (!list_empty(&queue->timer_list.list))
+ rpc_set_queue_timer(queue, expires);
+ spin_unlock(&queue->lock);
+}
+
static void __rpc_atrun(struct rpc_task *task)
{
- rpc_wake_up_task(task);
+ task->tk_status = 0;
}
/*
@@ -589,7 +567,7 @@ static void __rpc_atrun(struct rpc_task *task)
void rpc_delay(struct rpc_task *task, unsigned long delay)
{
task->tk_timeout = delay;
- rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
+ rpc_sleep_on(&delay_queue, task, __rpc_atrun);
}
EXPORT_SYMBOL_GPL(rpc_delay);
@@ -644,10 +622,6 @@ static void __rpc_execute(struct rpc_task *task)
BUG_ON(RPC_IS_QUEUED(task));
for (;;) {
- /*
- * Garbage collection of pending timers...
- */
- rpc_delete_timer(task);
/*
* Execute any pending callback.
@@ -816,8 +790,6 @@ EXPORT_SYMBOL_GPL(rpc_free);
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
{
memset(task, 0, sizeof(*task));
- setup_timer(&task->tk_timer, (void (*)(unsigned long))rpc_run_timer,
- (unsigned long)task);
atomic_set(&task->tk_count, 1);
task->tk_flags = task_setup_data->flags;
task->tk_ops = task_setup_data->callback_ops;
@@ -832,7 +804,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_owner = current->tgid;
/* Initialize workqueue for async tasks */
- task->tk_workqueue = rpciod_workqueue;
+ task->tk_workqueue = task_setup_data->workqueue;
task->tk_client = task_setup_data->rpc_client;
if (task->tk_client != NULL) {
@@ -868,13 +840,6 @@ rpc_alloc_task(void)
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
-static void rpc_free_task(struct rcu_head *rcu)
-{
- struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
- dprintk("RPC: %5u freeing task\n", task->tk_pid);
- mempool_free(task, rpc_task_mempool);
-}
-
/*
* Create a new task for the specified client.
*/
@@ -898,12 +863,25 @@ out:
return task;
}
-
-void rpc_put_task(struct rpc_task *task)
+static void rpc_free_task(struct rpc_task *task)
{
const struct rpc_call_ops *tk_ops = task->tk_ops;
void *calldata = task->tk_calldata;
+ if (task->tk_flags & RPC_TASK_DYNAMIC) {
+ dprintk("RPC: %5u freeing task\n", task->tk_pid);
+ mempool_free(task, rpc_task_mempool);
+ }
+ rpc_release_calldata(tk_ops, calldata);
+}
+
+static void rpc_async_release(struct work_struct *work)
+{
+ rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
+}
+
+void rpc_put_task(struct rpc_task *task)
+{
if (!atomic_dec_and_test(&task->tk_count))
return;
/* Release resources */
@@ -915,9 +893,11 @@ void rpc_put_task(struct rpc_task *task)
rpc_release_client(task->tk_client);
task->tk_client = NULL;
}
- if (task->tk_flags & RPC_TASK_DYNAMIC)
- call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
- rpc_release_calldata(tk_ops, calldata);
+ if (task->tk_workqueue != NULL) {
+ INIT_WORK(&task->u.tk_work, rpc_async_release);
+ queue_work(task->tk_workqueue, &task->u.tk_work);
+ } else
+ rpc_free_task(task);
}
EXPORT_SYMBOL_GPL(rpc_put_task);
@@ -937,9 +917,6 @@ static void rpc_release_task(struct rpc_task *task)
}
BUG_ON (RPC_IS_QUEUED(task));
- /* Synchronously delete any running timer */
- rpc_delete_timer(task);
-
#ifdef RPC_DEBUG
task->tk_magic = 0;
#endif
@@ -1029,11 +1006,20 @@ rpc_destroy_mempool(void)
kmem_cache_destroy(rpc_task_slabp);
if (rpc_buffer_slabp)
kmem_cache_destroy(rpc_buffer_slabp);
+ rpc_destroy_wait_queue(&delay_queue);
}
int
rpc_init_mempool(void)
{
+ /*
+ * The following is not strictly a mempool initialisation,
+ * but there is no harm in doing it here
+ */
+ rpc_init_wait_queue(&delay_queue, "delayq");
+ if (!rpciod_start())
+ goto err_nomem;
+
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
@@ -1054,13 +1040,6 @@ rpc_init_mempool(void)
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
- if (!rpciod_start())
- goto err_nomem;
- /*
- * The following is not strictly a mempool initialisation,
- * but there is no harm in doing it here
- */
- rpc_init_wait_queue(&delay_queue, "delayq");
return 0;
err_nomem:
rpc_destroy_mempool();
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 1d3e5fcc2cc4..c475977de05a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -175,7 +175,7 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
size_t base = xdr->page_base;
unsigned int pglen = xdr->page_len;
unsigned int flags = MSG_MORE;
- char buf[RPC_MAX_ADDRBUFLEN];
+ RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
slen = xdr->len;
@@ -716,7 +716,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
struct socket *newsock;
struct svc_sock *newsvsk;
int err, slen;
- char buf[RPC_MAX_ADDRBUFLEN];
+ RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
if (!sock)
@@ -1206,10 +1206,10 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
struct socket *sock;
int error;
int type;
- char buf[RPC_MAX_ADDRBUFLEN];
struct sockaddr_storage addr;
struct sockaddr *newsin = (struct sockaddr *)&addr;
int newlen;
+ RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("svc: svc_create_socket(%s, %d, %s)\n",
serv->sv_program->pg_name, protocol,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d5553b8179f9..85199c647022 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -188,9 +188,9 @@ out_sleep:
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (req && req->rq_ntrans)
- rpc_sleep_on(&xprt->resend, task, NULL, NULL);
+ rpc_sleep_on(&xprt->resend, task, NULL);
else
- rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
@@ -238,9 +238,9 @@ out_sleep:
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (req && req->rq_ntrans)
- rpc_sleep_on(&xprt->resend, task, NULL, NULL);
+ rpc_sleep_on(&xprt->resend, task, NULL);
else
- rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@@ -453,7 +453,7 @@ void xprt_wait_for_buffer_space(struct rpc_task *task)
struct rpc_xprt *xprt = req->rq_xprt;
task->tk_timeout = req->rq_timeout;
- rpc_sleep_on(&xprt->pending, task, NULL, NULL);
+ rpc_sleep_on(&xprt->pending, task, NULL);
}
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
@@ -472,7 +472,7 @@ void xprt_write_space(struct rpc_xprt *xprt)
if (xprt->snd_task) {
dprintk("RPC: write space: waking waiting task on "
"xprt %p\n", xprt);
- rpc_wake_up_task(xprt->snd_task);
+ rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
}
spin_unlock_bh(&xprt->transport_lock);
}
@@ -602,8 +602,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(rpciod_workqueue, &xprt->task_cleanup);
- else if (xprt->snd_task != NULL)
- rpc_wake_up_task(xprt->snd_task);
+ xprt_wake_pending_tasks(xprt, -ENOTCONN);
spin_unlock_bh(&xprt->transport_lock);
}
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
@@ -653,7 +652,7 @@ void xprt_connect(struct rpc_task *task)
task->tk_rqstp->rq_bytes_sent = 0;
task->tk_timeout = xprt->connect_timeout;
- rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
+ rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
xprt->stat.connect_start = jiffies;
xprt->ops->connect(task);
}
@@ -749,18 +748,19 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt);
void xprt_complete_rqst(struct rpc_task *task, int copied)
{
struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
task->tk_pid, ntohl(req->rq_xid), copied);
- task->tk_xprt->stat.recvs++;
+ xprt->stat.recvs++;
task->tk_rtt = (long)jiffies - req->rq_xtime;
list_del_init(&req->rq_list);
/* Ensure all writes are done before we update req->rq_received */
smp_wmb();
req->rq_received = req->rq_private_buf.len = copied;
- rpc_wake_up_task(task);
+ rpc_wake_up_queued_task(&xprt->pending, task);
}
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -769,17 +769,17 @@ static void xprt_timer(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
+ if (task->tk_status != -ETIMEDOUT)
+ return;
dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
- spin_lock(&xprt->transport_lock);
+ spin_lock_bh(&xprt->transport_lock);
if (!req->rq_received) {
if (xprt->ops->timer)
xprt->ops->timer(task);
- task->tk_status = -ETIMEDOUT;
- }
- task->tk_timeout = 0;
- rpc_wake_up_task(task);
- spin_unlock(&xprt->transport_lock);
+ } else
+ task->tk_status = 0;
+ spin_unlock_bh(&xprt->transport_lock);
}
/**
@@ -864,7 +864,7 @@ void xprt_transmit(struct rpc_task *task)
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
else if (!req->rq_received)
- rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
+ rpc_sleep_on(&xprt->pending, task, xprt_timer);
spin_unlock_bh(&xprt->transport_lock);
return;
}
@@ -875,7 +875,7 @@ void xprt_transmit(struct rpc_task *task)
*/
task->tk_status = status;
if (status == -ECONNREFUSED)
- rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ rpc_sleep_on(&xprt->sending, task, NULL);
}
static inline void do_xprt_reserve(struct rpc_task *task)
@@ -895,7 +895,7 @@ static inline void do_xprt_reserve(struct rpc_task *task)
dprintk("RPC: waiting for request slot\n");
task->tk_status = -EAGAIN;
task->tk_timeout = 0;
- rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
+ rpc_sleep_on(&xprt->backlog, task, NULL);
}
/**
@@ -1052,6 +1052,11 @@ static void xprt_destroy(struct kref *kref)
xprt->shutdown = 1;
del_timer_sync(&xprt->timer);
+ rpc_destroy_wait_queue(&xprt->binding);
+ rpc_destroy_wait_queue(&xprt->pending);
+ rpc_destroy_wait_queue(&xprt->sending);
+ rpc_destroy_wait_queue(&xprt->resend);
+ rpc_destroy_wait_queue(&xprt->backlog);
/*
* Tear down transport state and free the rpc_xprt
*/
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 30e7ac243a90..8bd3b0f73ac0 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1073,6 +1073,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
{
struct rpc_xprt *xprt;
read_descriptor_t rd_desc;
+ int read;
dprintk("RPC: xs_tcp_data_ready...\n");
@@ -1084,8 +1085,10 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
rd_desc.arg.data = xprt;
- rd_desc.count = 65536;
- tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
+ do {
+ rd_desc.count = 65536;
+ read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
+ } while (read > 0);
out:
read_unlock(&sk->sk_callback_lock);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index eea75888805e..b8788fd5e3c6 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -718,16 +718,16 @@ static struct sock *unix_find_other(struct net *net,
goto put_fail;
err = -ECONNREFUSED;
- if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
+ if (!S_ISSOCK(nd.path.dentry->d_inode->i_mode))
goto put_fail;
- u=unix_find_socket_byinode(net, nd.dentry->d_inode);
+ u = unix_find_socket_byinode(net, nd.path.dentry->d_inode);
if (!u)
goto put_fail;
if (u->sk_type == type)
- touch_atime(nd.mnt, nd.dentry);
+ touch_atime(nd.path.mnt, nd.path.dentry);
- path_release(&nd);
+ path_put(&nd.path);
err=-EPROTOTYPE;
if (u->sk_type != type) {
@@ -748,7 +748,7 @@ static struct sock *unix_find_other(struct net *net,
return u;
put_fail:
- path_release(&nd);
+ path_put(&nd.path);
fail:
*error=err;
return NULL;
@@ -819,12 +819,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current->fs->umask);
- err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
+ err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
if (err)
goto out_mknod_dput;
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- dput(nd.dentry);
- nd.dentry = dentry;
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ dput(nd.path.dentry);
+ nd.path.dentry = dentry;
addr->hash = UNIX_HASH_SIZE;
}
@@ -842,8 +842,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
list = &unix_socket_table[addr->hash];
} else {
list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
- u->dentry = nd.dentry;
- u->mnt = nd.mnt;
+ u->dentry = nd.path.dentry;
+ u->mnt = nd.path.mnt;
}
err = 0;
@@ -861,8 +861,8 @@ out:
out_mknod_dput:
dput(dentry);
out_mknod_unlock:
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- path_release(&nd);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ path_put(&nd.path);
out_mknod_parent:
if (err==-EEXIST)
err=-EADDRINUSE;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 8f9dbec319be..9201ef8ad90e 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -38,7 +38,7 @@ config XFRM_MIGRATE
config XFRM_STATISTICS
bool "Transformation statistics (EXPERIMENTAL)"
- depends on XFRM && PROC_FS && EXPERIMENTAL
+ depends on INET && XFRM && PROC_FS && EXPERIMENTAL
---help---
This statistics is not a SNMP/MIB specification but shows
statistics about transformation error (or almost error) factor
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 4d6ebc633a94..62188c6a06dd 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -109,7 +109,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
if (encap_type < 0) {
async = 1;
x = xfrm_input_state(skb);
- seq = XFRM_SKB_CB(skb)->seq;
+ seq = XFRM_SKB_CB(skb)->seq.input;
goto resume;
}
@@ -175,7 +175,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
spin_unlock(&x->lock);
- XFRM_SKB_CB(skb)->seq = seq;
+ XFRM_SKB_CB(skb)->seq.input = seq;
nexthdr = x->type->input(x, skb);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index fc690368325f..569d377932c4 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -62,7 +62,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
}
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
- XFRM_SKB_CB(skb)->seq = ++x->replay.oseq;
+ XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq;
if (unlikely(x->replay.oseq == 0)) {
XFRM_INC_STATS(LINUX_MIB_XFRMOUTSTATESEQERROR);
x->replay.oseq--;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 47219f98053f..9fc4c315f6cd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -331,15 +331,31 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
unsigned int nhashmask)
{
- struct hlist_node *entry, *tmp;
+ struct hlist_node *entry, *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
+ unsigned int h0 = 0;
+redo:
hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
unsigned int h;
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask);
- hlist_add_head(&pol->bydst, ndsttable+h);
+ if (!entry0) {
+ hlist_del(entry);
+ hlist_add_head(&pol->bydst, ndsttable+h);
+ h0 = h;
+ } else {
+ if (h != h0)
+ continue;
+ hlist_del(entry);
+ hlist_add_after(entry0, &pol->bydst);
+ }
+ entry0 = entry;
+ }
+ if (!hlist_empty(list)) {
+ entry0 = NULL;
+ goto redo;
}
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 78338079b7f5..f971ca5645f8 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1105,6 +1105,7 @@ static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p,
return xp;
error:
*errp = err;
+ xp->dead = 1;
xfrm_policy_destroy(xp);
return NULL;
}
OpenPOWER on IntegriCloud