summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_sysfs.c41
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/core/ethtool.c103
-rw-r--r--net/core/sock.c19
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c50
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/addrconf.c102
-rw-r--r--net/ipv6/fib6_rules.c11
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c28
-rw-r--r--net/ipv6/xfrm6_policy.c3
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_conn.c3
-rw-r--r--net/mac80211/debugfs_netdev.c10
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mlme.c13
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/sta_info.c21
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/mac80211/util.c18
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/sctp/input.c42
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sunrpc/xprtrdma/transport.c7
-rw-r--r--net/sunrpc/xprtsock.c9
-rw-r--r--net/tipc/bearer.c37
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/net.c25
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c57
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/wireless/reg.c12
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/xfrm/xfrm_policy.c7
46 files changed, 432 insertions, 320 deletions
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 1a79a6c7e30e..cafb55b0cea5 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -405,20 +406,11 @@ static struct device_type bt_host = {
.release = bt_host_release,
};
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int inquiry_cache_show(struct seq_file *f, void *p)
{
- struct hci_dev *hdev = file->private_data;
+ struct hci_dev *hdev = f->private;
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
- char buf[4096];
- int n = 0;
hci_dev_lock_bh(hdev);
@@ -426,23 +418,30 @@ static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf,
struct inquiry_data *data = &e->data;
bdaddr_t bdaddr;
baswap(&bdaddr, &data->bdaddr);
- n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- batostr(&bdaddr),
- data->pscan_rep_mode, data->pscan_period_mode,
- data->pscan_mode, data->dev_class[2],
- data->dev_class[1], data->dev_class[0],
- __le16_to_cpu(data->clock_offset),
- data->rssi, data->ssp_mode, e->timestamp);
+ seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ batostr(&bdaddr),
+ data->pscan_rep_mode, data->pscan_period_mode,
+ data->pscan_mode, data->dev_class[2],
+ data->dev_class[1], data->dev_class[0],
+ __le16_to_cpu(data->clock_offset),
+ data->rssi, data->ssp_mode, e->timestamp);
}
hci_dev_unlock_bh(hdev);
- return simple_read_from_buffer(userbuf, count, ppos, buf, n);
+ return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, inquiry_cache_show, inode->i_private);
}
static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = inquiry_cache_read,
+ .open = inquiry_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
int hci_register_sysfs(struct hci_dev *hdev)
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 19a6b9629c51..d115d5cea5b6 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -35,6 +35,7 @@ config BRIDGE
config BRIDGE_IGMP_SNOOPING
bool "IGMP snooping"
depends on BRIDGE
+ depends on INET
default y
---help---
If you say Y here, then the Ethernet bridge will be able selectively
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2559fb539836..12ce1eaa4f3e 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -38,7 +38,7 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
struct net_bridge_mdb_entry *mp;
struct hlist_node *p;
- hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+ hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
if (dst == mp->addr)
return mp;
}
@@ -627,8 +627,8 @@ static void br_multicast_port_query_expired(unsigned long data)
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
- if (port && (port->state == BR_STATE_DISABLED ||
- port->state == BR_STATE_BLOCKING))
+ if (port->state == BR_STATE_DISABLED ||
+ port->state == BR_STATE_BLOCKING)
goto out;
if (port->multicast_startup_queries_sent <
@@ -1135,7 +1135,7 @@ void br_multicast_stop(struct net_bridge *br)
if (mdb->old) {
spin_unlock_bh(&br->multicast_lock);
- synchronize_rcu_bh();
+ rcu_barrier_bh();
spin_lock_bh(&br->multicast_lock);
WARN_ON(mdb->old);
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0f2f82185ec4..f4cb6b6299d9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
/*
@@ -199,10 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -214,6 +212,10 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use
info.cmd = ETHTOOL_GDRVINFO;
ops->get_drvinfo(dev, &info);
+ /*
+ * this method of obtaining string set info is deprecated;
+ * Use ETHTOOL_GSSET_INFO instead.
+ */
if (ops->get_sset_count) {
int rc;
@@ -237,10 +239,67 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use
return 0;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
+ void __user *useraddr)
+{
+ struct ethtool_sset_info info;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u64 sset_mask;
+ int i, idx = 0, n_bits = 0, ret, rc;
+ u32 *info_buf = NULL;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&info, useraddr, sizeof(info)))
+ return -EFAULT;
+
+ /* store copy of mask, because we zero struct later on */
+ sset_mask = info.sset_mask;
+ if (!sset_mask)
+ return 0;
+
+ /* calculate size of return buffer */
+ n_bits = hweight64(sset_mask);
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GSSET_INFO;
+
+ info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
+ if (!info_buf)
+ return -ENOMEM;
+
+ /*
+ * fill return buffer based on input bitmask and successful
+ * get_sset_count return
+ */
+ for (i = 0; i < 64; i++) {
+ if (!(sset_mask & (1ULL << i)))
+ continue;
+
+ rc = ops->get_sset_count(dev, i);
+ if (rc >= 0) {
+ info.sset_mask |= (1ULL << i);
+ info_buf[idx++] = rc;
+ }
+ }
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ goto out;
+
+ useraddr += offsetof(struct ethtool_sset_info, data);
+ if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
+ goto out;
+
+ ret = 0;
+
+out:
+ kfree(info_buf);
+ return ret;
+}
+
+static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc cmd;
@@ -253,10 +312,7 @@ static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *usera
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -328,10 +384,7 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
list->count++;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rx_ntuple cmd;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -799,10 +852,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
@@ -816,10 +866,7 @@ static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *us
return 0;
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce;
@@ -1229,10 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
-/*
- * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
- */
-static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
{
struct ethtool_flash efl;
@@ -1471,6 +1515,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXNTUPLE:
rc = ethtool_get_rx_ntuple(dev, useraddr);
break;
+ case ETHTOOL_GSSET_INFO:
+ rc = ethtool_get_sset_info(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index fcd397a762ff..c5812bbc2cc9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else
- sk_add_backlog(sk, skb);
+ } else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
bh_unlock_sock(sk);
out:
sock_put(sk);
@@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sock_lock_init(newsk);
bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0);
/*
@@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk)
bh_lock_sock(sk);
} while ((skb = sk->sk_backlog.head) != NULL);
+
+ /*
+ * Doing the zeroing here guarantee we can not loop forever
+ * while a wild producer attempts to flood us.
+ */
+ sk->sk_backlog.len = 0;
}
/**
@@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
@@ -2276,7 +2288,8 @@ out_free_request_sock_slab:
prot->rsk_prot->slab = NULL;
}
out_free_request_sock_slab_name:
- kfree(prot->rsk_prot->slab_name);
+ if (prot->rsk_prot)
+ kfree(prot->rsk_prot->slab_name);
out_free_sock_slab:
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index af226a063141..0d508c359fa9 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
- sk_add_backlog(child, skb);
+ __sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index c0c5274d0271..f47c9f76754b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1144,12 +1144,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
if (saddr)
memcpy(&iph->saddr, saddr, 4);
-
- if (daddr) {
+ if (daddr)
memcpy(&iph->daddr, daddr, 4);
- return t->hlen;
- }
- if (iph->daddr && !ipv4_is_multicast(iph->daddr))
+ if (iph->daddr)
return t->hlen;
return -t->hlen;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 242ed2307370..4f1f337f4337 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
+ SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
+ SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b2ba5581d2ae..d9b40248b97f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -146,7 +146,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
static int rt_garbage_collect(struct dst_ops *ops);
-static void rt_emergency_hash_rebuild(struct net *net);
static struct dst_ops ipv4_dst_ops = {
@@ -780,11 +779,30 @@ static void rt_do_flush(int process_context)
#define FRACT_BITS 3
#define ONE (1UL << FRACT_BITS)
+/*
+ * Given a hash chain and an item in this hash chain,
+ * find if a previous entry has the same hash_inputs
+ * (but differs on tos, mark or oif)
+ * Returns 0 if an alias is found.
+ * Returns ONE if rth has no alias before itself.
+ */
+static int has_noalias(const struct rtable *head, const struct rtable *rth)
+{
+ const struct rtable *aux = head;
+
+ while (aux != rth) {
+ if (compare_hash_inputs(&aux->fl, &rth->fl))
+ return 0;
+ aux = aux->u.dst.rt_next;
+ }
+ return ONE;
+}
+
static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
- struct rtable *rth, *aux, **rthp;
+ struct rtable *rth, **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
unsigned long delta;
@@ -835,15 +853,7 @@ nofree:
* attributes don't unfairly skew
* the length computation
*/
- for (aux = rt_hash_table[i].chain;;) {
- if (aux == rth) {
- length += ONE;
- break;
- }
- if (compare_hash_inputs(&aux->fl, &rth->fl))
- break;
- aux = aux->u.dst.rt_next;
- }
+ length += has_noalias(rt_hash_table[i].chain, rth);
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
@@ -1073,6 +1083,21 @@ work_done:
out: return 0;
}
+/*
+ * Returns number of entries in a hash chain that have different hash_inputs
+ */
+static int slow_chain_length(const struct rtable *head)
+{
+ int length = 0;
+ const struct rtable *rth = head;
+
+ while (rth) {
+ length += has_noalias(head, rth);
+ rth = rth->u.dst.rt_next;
+ }
+ return length >> FRACT_BITS;
+}
+
static int rt_intern_hash(unsigned hash, struct rtable *rt,
struct rtable **rp, struct sk_buff *skb)
{
@@ -1185,7 +1210,8 @@ restart:
rt_free(cand);
}
} else {
- if (chain_length > rt_chain_length_max) {
+ if (chain_length > rt_chain_length_max &&
+ slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
struct net *net = dev_net(rt->u.dst.dev);
int num = ++net->ipv4.current_rt_cache_rebuild_count;
if (!rt_caching(dev_net(rt->u.dst.dev))) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c3588b4fd979..8d51d39ad1bb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1651,8 +1651,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sk)
goto no_tcp_socket;
- if (iph->ttl < inet_sk(sk)->min_ttl)
+ if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse;
+ }
process:
if (sk->sk_state == TCP_TIME_WAIT)
@@ -1682,8 +1684,11 @@ process:
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
- } else
- sk_add_backlog(sk, skb);
+ } else if (unlikely(sk_add_backlog(sk, skb))) {
+ bh_unlock_sock(sk);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+ goto discard_and_relse;
+ }
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f206ee5dda80..4199bc6915c5 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
- sk_add_backlog(child, skb);
+ __sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4a1605d3f909..f181b78f2385 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2395,13 +2395,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcp_extend_values *xvp = tcp_xv(rvp);
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_cookie_values *cvp = tp->cookie_values;
struct tcphdr *th;
struct sk_buff *skb;
struct tcp_md5sig_key *md5;
int tcp_header_size;
int mss;
+ int s_data_desired = 0;
- skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
+ if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
+ s_data_desired = cvp->s_data_desired;
+ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
if (skb == NULL)
return NULL;
@@ -2457,16 +2461,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
if (OPTION_COOKIE_EXTENSION & opts.options) {
- const struct tcp_cookie_values *cvp = tp->cookie_values;
-
- if (cvp != NULL &&
- cvp->s_data_constant &&
- cvp->s_data_desired > 0) {
- u8 *buf = skb_put(skb, cvp->s_data_desired);
+ if (s_data_desired) {
+ u8 *buf = skb_put(skb, s_data_desired);
/* copy data directly from the listening socket. */
- memcpy(buf, cvp->s_data_payload, cvp->s_data_desired);
- TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired;
+ memcpy(buf, cvp->s_data_payload, s_data_desired);
+ TCP_SKB_CB(skb)->end_seq += s_data_desired;
}
if (opts.hash_size > 0) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 608a5446d05b..7af756d0f931 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ goto drop;
+ }
bh_unlock_sock(sk);
return rc;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 67107d63c1cd..e4a1483fba77 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return 0;
}
-static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
- xdst->u.rt.fl = rt->fl;
+ xdst->u.rt.fl = *fl;
xdst->u.dst.dev = dev;
dev_hold(dev);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 88fd8c5877ee..6cf3ee14ace3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2615,7 +2615,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
static int addrconf_ifdown(struct net_device *dev, int how)
{
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa, **bifa;
+ struct inet6_ifaddr *ifa, *keep_list, **bifa;
struct net *net = dev_net(dev);
int i;
@@ -2649,11 +2649,11 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&addrconf_hash_lock);
while ((ifa = *bifa) != NULL) {
if (ifa->idev == idev &&
- (how || !(ifa->flags&IFA_F_PERMANENT))) {
+ (how || !(ifa->flags&IFA_F_PERMANENT) ||
+ ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
*bifa = ifa->lst_next;
ifa->lst_next = NULL;
- addrconf_del_timer(ifa);
- in6_ifa_put(ifa);
+ __in6_ifa_put(ifa);
continue;
}
bifa = &ifa->lst_next;
@@ -2689,31 +2689,51 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&idev->lock);
}
#endif
- bifa = &idev->addr_list;
- while ((ifa = *bifa) != NULL) {
- if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) {
- /* Retain permanent address on admin down */
+ keep_list = NULL;
+ bifa = &keep_list;
+ while ((ifa = idev->addr_list) != NULL) {
+ idev->addr_list = ifa->if_next;
+ ifa->if_next = NULL;
+
+ addrconf_del_timer(ifa);
+
+ /* If just doing link down, and address is permanent
+ and not link-local, then retain it. */
+ if (how == 0 &&
+ (ifa->flags&IFA_F_PERMANENT) &&
+ !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
+
+ /* Move to holding list */
+ *bifa = ifa;
bifa = &ifa->if_next;
- /* Restart DAD if needed when link comes back up */
- if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
- idev->cnf.accept_dad <= 0 ||
- (ifa->flags & IFA_F_NODAD)))
- ifa->flags |= IFA_F_TENTATIVE;
- } else {
- *bifa = ifa->if_next;
- ifa->if_next = NULL;
+ /* If not doing DAD on this address, just keep it. */
+ if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
+ idev->cnf.accept_dad <= 0 ||
+ (ifa->flags & IFA_F_NODAD))
+ continue;
+ /* If it was tentative already, no need to notify */
+ if (ifa->flags & IFA_F_TENTATIVE)
+ continue;
+
+ /* Flag it for later restoration when link comes up */
+ ifa->flags |= IFA_F_TENTATIVE;
+ in6_ifa_hold(ifa);
+ } else {
ifa->dead = 1;
- write_unlock_bh(&idev->lock);
+ }
+ write_unlock_bh(&idev->lock);
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
- in6_ifa_put(ifa);
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+ in6_ifa_put(ifa);
- write_lock_bh(&idev->lock);
- }
+ write_lock_bh(&idev->lock);
}
+
+ idev->addr_list = keep_list;
+
write_unlock_bh(&idev->lock);
/* Step 5: Discard multicast list */
@@ -2739,28 +2759,29 @@ static int addrconf_ifdown(struct net_device *dev, int how)
static void addrconf_rs_timer(unsigned long data)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
+ struct inet6_dev *idev = ifp->idev;
- if (ifp->idev->cnf.forwarding)
+ read_lock(&idev->lock);
+ if (idev->dead || !(idev->if_flags & IF_READY))
goto out;
- if (ifp->idev->if_flags & IF_RA_RCVD) {
- /*
- * Announcement received after solicitation
- * was sent
- */
+ if (idev->cnf.forwarding)
+ goto out;
+
+ /* Announcement received after solicitation was sent */
+ if (idev->if_flags & IF_RA_RCVD)
goto out;
- }
spin_lock(&ifp->lock);
- if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) {
+ if (ifp->probes++ < idev->cnf.rtr_solicits) {
/* The wait after the last probe can be shorter */
addrconf_mod_timer(ifp, AC_RS,
- (ifp->probes == ifp->idev->cnf.rtr_solicits) ?
- ifp->idev->cnf.rtr_solicit_delay :
- ifp->idev->cnf.rtr_solicit_interval);
+ (ifp->probes == idev->cnf.rtr_solicits) ?
+ idev->cnf.rtr_solicit_delay :
+ idev->cnf.rtr_solicit_interval);
spin_unlock(&ifp->lock);
- ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
+ ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
} else {
spin_unlock(&ifp->lock);
/*
@@ -2768,10 +2789,11 @@ static void addrconf_rs_timer(unsigned long data)
* assumption any longer.
*/
printk(KERN_DEBUG "%s: no IPv6 routers present\n",
- ifp->idev->dev->name);
+ idev->dev->name);
}
out:
+ read_unlock(&idev->lock);
in6_ifa_put(ifp);
}
@@ -2850,9 +2872,9 @@ static void addrconf_dad_timer(unsigned long data)
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
- read_lock_bh(&idev->lock);
- if (idev->dead) {
- read_unlock_bh(&idev->lock);
+ read_lock(&idev->lock);
+ if (idev->dead || !(idev->if_flags & IF_READY)) {
+ read_unlock(&idev->lock);
goto out;
}
@@ -2864,7 +2886,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
- read_unlock_bh(&idev->lock);
+ read_unlock(&idev->lock);
addrconf_dad_completed(ifp);
@@ -2874,7 +2896,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->probes--;
addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
spin_unlock(&ifp->lock);
- read_unlock_bh(&idev->lock);
+ read_unlock(&idev->lock);
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 551882b9dfd6..5e463c43fcc2 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
if ((rule->flags & FIB_RULE_FIND_SADDR) &&
r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
struct in6_addr saddr;
- unsigned int srcprefs = 0;
-
- if (flags & RT6_LOOKUP_F_SRCPREF_TMP)
- srcprefs |= IPV6_PREFER_SRC_TMP;
- if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC)
- srcprefs |= IPV6_PREFER_SRC_PUBLIC;
- if (flags & RT6_LOOKUP_F_SRCPREF_COA)
- srcprefs |= IPV6_PREFER_SRC_COA;
if (ipv6_dev_get_saddr(net,
ip6_dst_idev(&rt->u.dst)->dev,
- &flp->fl6_dst, srcprefs,
+ &flp->fl6_dst,
+ rt6_flags2srcprefs(flags),
&saddr))
goto again;
if (!ipv6_prefix_equal(&saddr, &r->src.addr,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b08879e97f22..52cd3eff31dc 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -819,15 +819,8 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
if (!ipv6_addr_any(&fl->fl6_src))
flags |= RT6_LOOKUP_F_HAS_SADDR;
- else if (sk) {
- unsigned int prefs = inet6_sk(sk)->srcprefs;
- if (prefs & IPV6_PREFER_SRC_TMP)
- flags |= RT6_LOOKUP_F_SRCPREF_TMP;
- if (prefs & IPV6_PREFER_SRC_PUBLIC)
- flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
- if (prefs & IPV6_PREFER_SRC_COA)
- flags |= RT6_LOOKUP_F_SRCPREF_COA;
- }
+ else if (sk)
+ flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6963a6b6763e..9b6dbba80d31 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1740,8 +1740,11 @@ process:
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
- } else
- sk_add_backlog(sk, skb);
+ } else if (unlikely(sk_add_backlog(sk, skb))) {
+ bh_unlock_sock(sk);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+ goto discard_and_relse;
+ }
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 52b8347ae3b2..3c0c9c755c92 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
- else
- sk_add_backlog(sk, skb1);
+ else if (sk_add_backlog(sk, skb1)) {
+ kfree_skb(skb1);
+ bh_unlock_sock(sk);
+ goto drop;
+ }
bh_unlock_sock(sk);
- } else {
- atomic_inc(&sk->sk_drops);
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_INERRORS, IS_UDPLITE(sk));
+ continue;
}
+drop:
+ atomic_inc(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_INERRORS, IS_UDPLITE(sk));
}
}
/*
@@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+ atomic_inc(&sk->sk_drops);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ goto discard;
+ }
bh_unlock_sock(sk);
sock_put(sk);
return 0;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index dbdc696f5fc5..ae181651c75a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return 0;
}
-static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct rt6_info *rt = (struct rt6_info*)xdst->route;
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 019c780512e8..86d6985b9d49 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
llc_conn_state_process(sk, skb);
else {
llc_set_backlog_type(skb, LLC_EVENT);
- sk_add_backlog(sk, skb);
+ __sk_add_backlog(sk, skb);
}
}
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index a8dde9b010da..a12144da7974 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -827,7 +827,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
- sk_add_backlog(sk, skb);
+ if (sk_add_backlog(sk, skb))
+ goto drop_unlock;
}
out:
bh_unlock_sock(sk);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index ee61a9f6fabc..623e6644b80c 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -48,20 +48,24 @@ static ssize_t ieee80211_if_write(
ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
{
u8 *buf;
- ssize_t ret = -ENODEV;
+ ssize_t ret;
- buf = kzalloc(count, GFP_KERNEL);
+ buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ ret = -EFAULT;
if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
+ goto freebuf;
+ ret = -ENODEV;
rtnl_lock();
if (sdata->dev->reg_state == NETREG_REGISTERED)
ret = (*write)(sdata, buf, count);
rtnl_unlock();
+freebuf:
+ kfree(buf);
return ret;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 06c33b68d8e5..b887e484ae04 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
sdata->vif.bss_conf.enable_beacon =
- !!rcu_dereference(sdata->u.ap.beacon);
+ !!sdata->u.ap.beacon;
break;
case NL80211_IFTYPE_ADHOC:
sdata->vif.bss_conf.enable_beacon =
- !!rcu_dereference(sdata->u.ibss.presp);
+ !!sdata->u.ibss.presp;
break;
case NL80211_IFTYPE_MESH_POINT:
sdata->vif.bss_conf.enable_beacon = true;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 61080c5fad50..7a6bebce7f2f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_ACTION:
- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
- return RX_DROP_MONITOR;
- /* fall through */
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
skb_queue_tail(&ifmsh->skb_queue, skb);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index ce84237ebad3..ccff6133e19a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (SN_GT(mpath->sn, orig_sn) ||
(mpath->sn == orig_sn &&
action == MPATH_PREQ &&
- new_metric > mpath->metric)) {
+ new_metric >= mpath->metric)) {
process = false;
fresh_info = false;
}
@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
cpu_to_le32(orig_sn), 0, target_addr,
- cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
+ cpu_to_le32(target_sn), next_hop, hopcount,
ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4c189d0be4a3..461167dfa42c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -447,10 +447,12 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
ieee80211_send_nullfunc(local, sdata, 1);
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
- conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- }
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+ return;
+
+ conf->flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
}
@@ -569,7 +571,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
(!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
ieee80211_send_nullfunc(local, sdata, 1);
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
+ if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+ (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
local->hw.conf.flags |= IEEE80211_CONF_PS;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a33f865807f9..c0ad7e879a6e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1994,6 +1994,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
+ case MESH_PLINK_CATEGORY:
+ case MESH_PATH_SEL_CATEGORY:
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+ break;
}
/*
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 5bf044b92dca..4de987cbda1c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
- sta = rcu_dereference(sta->hnext);
+ sta = rcu_dereference_check(sta->hnext,
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
}
return sta;
}
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
while (sta) {
if ((sta->sdata == sdata ||
sta->sdata->bss == sdata->bss) &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
- sta = rcu_dereference(sta->hnext);
+ sta = rcu_dereference_check(sta->hnext,
+ rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
}
return sta;
}
@@ -431,6 +443,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
rcu_read_lock();
err = -EEXIST;
goto out_free;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f7209d691c35..2cb77267f733 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1992,6 +1992,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_sub_if_data *sdata;
unsigned long flags;
int i;
bool txok;
@@ -2028,6 +2029,11 @@ void ieee80211_tx_pending(unsigned long data)
if (!txok)
break;
}
+
+ if (skb_queue_empty(&local->pending[i]))
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(sdata->dev, i));
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 7614821caed5..ad9009f717ed 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -279,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
/* someone still has this queue stopped */
return;
- if (!skb_queue_empty(&local->pending[queue]))
+ if (skb_queue_empty(&local->pending[queue])) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
+ } else
tasklet_schedule(&local->tx_pending_tasklet);
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
- rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -1102,9 +1102,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
*/
res = drv_start(local);
if (res) {
- WARN(local->suspended, "Harware became unavailable "
- "upon resume. This is could be a software issue"
- "prior to suspend or a hardware issue\n");
+ WARN(local->suspended, "Hardware became unavailable "
+ "upon resume. This could be a software issue "
+ "prior to suspend or a hardware issue.\n");
return res;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 031a5e6fb4aa..1612d417d10c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1688,6 +1688,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
{
switch (i->type) {
case PACKET_MR_MULTICAST:
+ if (i->alen != dev->addr_len)
+ return -EINVAL;
if (what > 0)
return dev_mc_add(dev, i->addr, i->alen, 0);
else
@@ -1700,6 +1702,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
return dev_set_allmulti(dev, what);
break;
case PACKET_MR_UNICAST:
+ if (i->alen != dev->addr_len)
+ return -EINVAL;
if (what > 0)
return dev_unicast_add(dev, i->addr);
else
@@ -1734,7 +1738,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
goto done;
err = -EINVAL;
- if (mreq->mr_alen != dev->addr_len)
+ if (mreq->mr_alen > dev->addr_len)
goto done;
err = -ENOBUFS;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e67add..3d74b264ea22 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer,
struct sctp_transport **pt);
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
}
if (sock_owned_by_user(sk)) {
+ if (sctp_add_backlog(sk, skb)) {
+ sctp_bh_unlock_sock(sk);
+ sctp_chunk_free(chunk);
+ skb = NULL; /* sctp_chunk_free already freed the skb */
+ goto discard_release;
+ }
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
- sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk_add_backlog(sk, skb);
- backloged = 1;
+ if (sk_add_backlog(sk, skb))
+ sctp_chunk_free(chunk);
+ else
+ backloged = 1;
} else
sctp_inq_push(inqueue, chunk);
@@ -362,22 +369,27 @@ done:
return 0;
}
-static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_ep_common *rcvr = chunk->rcvr;
+ int ret;
- /* Hold the assoc/ep while hanging on the backlog queue.
- * This way, we know structures we need will not disappear from us
- */
- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_hold(sctp_assoc(rcvr));
- else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
- sctp_endpoint_hold(sctp_ep(rcvr));
- else
- BUG();
+ ret = sk_add_backlog(sk, skb);
+ if (!ret) {
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear
+ * from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+ }
+ return ret;
- sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f6d1e59c4151..dfc5c127efd4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);
+ /* Set socket backlog limit. */
+ sk->sk_backlog.limit = sysctl_sctp_rmem[1];
+
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 7018eef1dcdd..f96c2fe6137b 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -160,16 +160,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt)
(void)rpc_ntop(sap, buf, sizeof(buf));
xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
- (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
- (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
- NIPQUAD(sin->sin_addr.s_addr));
+ snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
- (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
/* netid */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d739e5d15d8..4f55ab7ec1b1 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
switch (sap->sa_family) {
case AF_INET:
sin = xs_addr_in(xprt);
- (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
- NIPQUAD(sin->sin_addr.s_addr));
+ snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
break;
case AF_INET6:
sin6 = xs_addr_in6(xprt);
- (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
+ snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
break;
default:
BUG();
@@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
struct sockaddr *sap = xs_addr(xprt);
char buf[128];
- (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
- (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
+ snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 327011fcc407..78091375ca12 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -45,10 +45,10 @@
#define MAX_ADDR_STR 32
-static struct media *media_list = NULL;
+static struct media media_list[MAX_MEDIA];
static u32 media_count = 0;
-struct bearer *tipc_bearers = NULL;
+struct bearer tipc_bearers[MAX_BEARERS];
/**
* media_name_valid - validate media name
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
int res = -EINVAL;
write_lock_bh(&tipc_net_lock);
- if (!media_list)
- goto exit;
+ if (tipc_mode != TIPC_NET_MODE) {
+ warn("Media <%s> rejected, not in networked mode yet\n", name);
+ goto exit;
+ }
if (!media_name_valid(name)) {
warn("Media <%s> rejected, illegal name\n", name);
goto exit;
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
-int tipc_bearer_init(void)
-{
- int res;
-
- write_lock_bh(&tipc_net_lock);
- tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
- media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
- if (tipc_bearers && media_list) {
- res = 0;
- } else {
- kfree(tipc_bearers);
- kfree(media_list);
- tipc_bearers = NULL;
- media_list = NULL;
- res = -ENOMEM;
- }
- write_unlock_bh(&tipc_net_lock);
- return res;
-}
-
void tipc_bearer_stop(void)
{
u32 i;
- if (!tipc_bearers)
- return;
-
for (i = 0; i < MAX_BEARERS; i++) {
if (tipc_bearers[i].active)
tipc_bearers[i].publ.blocked = 1;
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
if (tipc_bearers[i].active)
bearer_disable(tipc_bearers[i].publ.name);
}
- kfree(tipc_bearers);
- kfree(media_list);
- tipc_bearers = NULL;
- media_list = NULL;
media_count = 0;
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ca5734892713..000228e93f9e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,7 +114,7 @@ struct bearer_name {
struct link;
-extern struct bearer *tipc_bearers;
+extern struct bearer tipc_bearers[];
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6f50f6423f63..1a7e4665af80 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
(msg_destnode(msg) != tipc_own_addr)))
goto cont;
+ /* Discard non-routeable messages destined for another node */
+
+ if (unlikely(!msg_isdata(msg) &&
+ (msg_destnode(msg) != tipc_own_addr))) {
+ if ((msg_user(msg) != CONN_MANAGER) &&
+ (msg_user(msg) != MSG_FRAGMENTER))
+ goto cont;
+ }
+
/* Locate unicast link endpoint that should handle message */
n_ptr = tipc_node_find(msg_prevnode(msg));
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7906608bf510..f25b1cdb64eb 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,8 @@
*/
DEFINE_RWLOCK(tipc_net_lock);
-struct network tipc_net = { NULL };
+struct _zone *tipc_zones[256] = { NULL, };
+struct network tipc_net = { tipc_zones };
struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
{
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
}
}
-static int net_init(void)
-{
- memset(&tipc_net, 0, sizeof(tipc_net));
- tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
- if (!tipc_net.zones) {
- return -ENOMEM;
- }
- return 0;
-}
-
static void net_stop(void)
{
u32 z_num;
- if (!tipc_net.zones)
- return;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+ for (z_num = 1; z_num <= tipc_max_zones; z_num++)
tipc_zone_delete(tipc_net.zones[z_num]);
- }
- kfree(tipc_net.zones);
- tipc_net.zones = NULL;
}
static void net_route_named_msg(struct sk_buff *buf)
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
tipc_named_reinit();
tipc_port_reinit();
- if ((res = tipc_bearer_init()) ||
- (res = net_init()) ||
- (res = tipc_cltr_init()) ||
+ if ((res = tipc_cltr_init()) ||
(res = tipc_bclink_init())) {
return res;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1ea64f09cc45..4b235fc1c70f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
- sk_add_backlog(sk, buf);
- res = TIPC_OK;
+ if (sk_add_backlog(sk, buf))
+ res = TIPC_ERR_OVERLOAD;
+ else
+ res = TIPC_OK;
}
bh_unlock_sock(sk);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ac91f0dfa144..ff123e56114a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -76,19 +76,6 @@ struct top_srv {
static struct top_srv topsrv = { 0 };
/**
- * htohl - convert value to endianness used by destination
- * @in: value to convert
- * @swap: non-zero if endianness must be reversed
- *
- * Returns converted value
- */
-
-static u32 htohl(u32 in, int swap)
-{
- return swap ? swab32(in) : in;
-}
-
-/**
* subscr_send_event - send a message containing a tipc_event to the subscriber
*
* Note: Must not hold subscriber's server port lock, since tipc_send() will
@@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub,
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
- sub->evt.event = htohl(event, sub->swap);
- sub->evt.found_lower = htohl(found_lower, sub->swap);
- sub->evt.found_upper = htohl(found_upper, sub->swap);
- sub->evt.port.ref = htohl(port_ref, sub->swap);
- sub->evt.port.node = htohl(node, sub->swap);
+ sub->evt.event = htonl(event);
+ sub->evt.found_lower = htonl(found_lower);
+ sub->evt.found_upper = htonl(found_upper);
+ sub->evt.port.ref = htonl(port_ref);
+ sub->evt.port.node = htonl(node);
tipc_send(sub->server_ref, 1, &msg_sect);
}
@@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s,
{
struct subscription *sub;
struct subscription *sub_temp;
+ __u32 type, lower, upper;
int found = 0;
/* Find first matching subscription, exit if not found */
+ type = ntohl(s->seq.type);
+ lower = ntohl(s->seq.lower);
+ upper = ntohl(s->seq.upper);
+
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- found = 1;
- break;
- }
+ if ((type == sub->seq.type) &&
+ (lower == sub->seq.lower) &&
+ (upper == sub->seq.upper)) {
+ found = 1;
+ break;
+ }
}
if (!found)
return;
@@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
struct subscriber *subscriber)
{
struct subscription *sub;
- int swap;
-
- /* Determine subscriber's endianness */
-
- swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
/* Detect & process a subscription cancellation request */
- if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
- s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+ if (ntohl(s->filter) & TIPC_SUB_CANCEL) {
subscr_cancel(s, subscriber);
return NULL;
}
@@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
/* Initialize subscription object */
- sub->seq.type = htohl(s->seq.type, swap);
- sub->seq.lower = htohl(s->seq.lower, swap);
- sub->seq.upper = htohl(s->seq.upper, swap);
- sub->timeout = htohl(s->timeout, swap);
- sub->filter = htohl(s->filter, swap);
+ sub->seq.type = ntohl(s->seq.type);
+ sub->seq.lower = ntohl(s->seq.lower);
+ sub->seq.upper = ntohl(s->seq.upper);
+ sub->timeout = ntohl(s->timeout);
+ sub->filter = ntohl(s->filter);
if ((!(sub->filter & TIPC_SUB_PORTS) ==
!(sub->filter & TIPC_SUB_SERVICE)) ||
(sub->seq.lower > sub->seq.upper)) {
@@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
sub->server_ref = subscriber->port_ref;
- sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
atomic_inc(&topsrv.subscription_count);
if (sub->timeout != TIPC_WAIT_FOREVER) {
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d202..c20f496d95b2 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub,
* @nameseq_list: adjacent subscriptions in name sequence's subscription list
* @subscription_list: adjacent subscriptions in subscriber's subscription list
* @server_ref: object reference of server port associated with subscription
- * @swap: indicates if subscriber uses opposite endianness in its messages
* @evt: template for events generated by subscription
*/
@@ -66,7 +65,6 @@ struct subscription {
struct list_head nameseq_list;
struct list_head subscription_list;
u32 server_ref;
- int swap;
struct tipc_event evt;
};
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index e857d72c7e8c..496348c48506 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -324,7 +324,7 @@ struct reg_regdb_search_request {
};
static LIST_HEAD(reg_regdb_search_list);
-static DEFINE_SPINLOCK(reg_regdb_search_lock);
+static DEFINE_MUTEX(reg_regdb_search_mutex);
static void reg_regdb_search(struct work_struct *work)
{
@@ -332,7 +332,7 @@ static void reg_regdb_search(struct work_struct *work)
const struct ieee80211_regdomain *curdom, *regdom;
int i, r;
- spin_lock(&reg_regdb_search_lock);
+ mutex_lock(&reg_regdb_search_mutex);
while (!list_empty(&reg_regdb_search_list)) {
request = list_first_entry(&reg_regdb_search_list,
struct reg_regdb_search_request,
@@ -346,18 +346,16 @@ static void reg_regdb_search(struct work_struct *work)
r = reg_copy_regd(&regdom, curdom);
if (r)
break;
- spin_unlock(&reg_regdb_search_lock);
mutex_lock(&cfg80211_mutex);
set_regdom(regdom);
mutex_unlock(&cfg80211_mutex);
- spin_lock(&reg_regdb_search_lock);
break;
}
}
kfree(request);
}
- spin_unlock(&reg_regdb_search_lock);
+ mutex_unlock(&reg_regdb_search_mutex);
}
static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
@@ -375,9 +373,9 @@ static void reg_regdb_query(const char *alpha2)
memcpy(request->alpha2, alpha2, 2);
- spin_lock(&reg_regdb_search_lock);
+ mutex_lock(&reg_regdb_search_mutex);
list_add_tail(&request->list, &reg_regdb_search_list);
- spin_unlock(&reg_regdb_search_lock);
+ mutex_unlock(&reg_regdb_search_mutex);
schedule_work(&reg_regdb_work);
}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 3e1efe534645..52e304212241 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
- sk_add_backlog(sk, skb);
+ queued = !sk_add_backlog(sk, skb);
}
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 34a5ef8316e7..843e066649cb 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return err;
}
-static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct xfrm_policy_afinfo *afinfo =
xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
if (!afinfo)
return -EINVAL;
- err = afinfo->fill_dst(xdst, dev);
+ err = afinfo->fill_dst(xdst, dev, fl);
xfrm_policy_put_afinfo(afinfo);
@@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
- err = xfrm_fill_dst(xdst, dev);
+ err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
OpenPOWER on IntegriCloud