summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/ceph/osdmap.c42
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/ieee802154/6lowpan.h2
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c29
-rw-r--r--net/irda/iriap.c7
-rw-r--r--net/key/af_key.c8
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/mac80211/cfg.c11
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/mlme.c28
-rw-r--r--net/mac80211/tx.c80
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/xt_AUDIT.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.c27
-rw-r--r--net/rds/message.c8
-rw-r--r--net/rds/stats.c1
-rw-r--r--net/sched/sch_qfq.c66
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/tsnmap.c13
-rw-r--r--net/sctp/ulpqueue.c87
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c12
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/xprtsock.c15
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/nl80211.c110
45 files changed, 401 insertions, 266 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 74dea377fe5b..de2e950a0a7a 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = {
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
- //.zc_request = p9_virtio_zc_request,
+ .zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
/*
* We leave one entry for input and one entry for response
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index d5f1d3fd4b28..314c73ed418f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- mdst = br_mdb_get(br, skb);
+ mdst = br_mdb_get(br, skb, vid);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
br_multicast_deliver(mdst, skb);
else
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 480330151898..828e2bcc1f52 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
if (is_broadcast_ether_addr(dest))
skb2 = skb;
else if (is_multicast_ether_addr(dest)) {
- mdst = br_mdb_get(br, skb);
+ mdst = br_mdb_get(br, skb, vid);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 9f97b850fc65..ee79f3f20383 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
port = p->port;
if (port) {
struct br_mdb_entry e;
+ memset(&e, 0, sizeof(e));
e.ifindex = port->dev->ifindex;
e.state = p->state;
if (p->addr.proto == htons(ETH_P_IP))
@@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
break;
bpm = nlmsg_data(nlh);
+ memset(bpm, 0, sizeof(*bpm));
bpm->ifindex = dev->ifindex;
if (br_mdb_fill_info(skb, cb, dev) < 0)
goto out;
@@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
+ memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = dev->ifindex;
nest = nla_nest_start(skb, MDBA_MDB);
@@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
{
struct br_mdb_entry entry;
+ memset(&entry, 0, sizeof(entry));
entry.ifindex = port->dev->ifindex;
entry.addr.proto = group->proto;
entry.addr.u.ip4 = group->u.ip4;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 10e6fce1bb62..923fbeaf7afd 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
#endif
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
- struct sk_buff *skb)
+ struct sk_buff *skb, u16 vid)
{
struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
struct br_ip ip;
@@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
return NULL;
ip.proto = skb->protocol;
+ ip.vid = vid;
switch (skb->protocol) {
case htons(ETH_P_IP):
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 6d314c4e6bcb..3cbf5beb3d4b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -442,7 +442,7 @@ extern int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb);
extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
- struct sk_buff *skb);
+ struct sk_buff *skb, u16 vid);
extern void br_multicast_add_port(struct net_bridge_port *port);
extern void br_multicast_del_port(struct net_bridge_port *port);
extern void br_multicast_enable_port(struct net_bridge_port *port);
@@ -504,7 +504,7 @@ static inline int br_multicast_rcv(struct net_bridge *br,
}
static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
- struct sk_buff *skb)
+ struct sk_buff *skb, u16 vid)
{
return NULL;
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 1ae1d9cb278d..21760f008974 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
return NULL;
}
-void caif_flow_cb(struct sk_buff *skb)
+static void caif_flow_cb(struct sk_buff *skb)
{
struct caif_device_entry *caifd;
void (*dtor)(struct sk_buff *skb) = NULL;
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 3ebc8cbc91ff..ef8ebaa993cf 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
layr->up->ctrlcmd(layr->up, ctrl, layr->id);
}
-struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
- u8 braddr[ETH_ALEN])
+static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+ u8 braddr[ETH_ALEN])
{
struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 69bc4bf89e3e..4543b9aba40c 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
return 0;
}
+static int __decode_pgid(void **p, void *end, struct ceph_pg *pg)
+{
+ u8 v;
+
+ ceph_decode_need(p, end, 1+8+4+4, bad);
+ v = ceph_decode_8(p);
+ if (v != 1)
+ goto bad;
+ pg->pool = ceph_decode_64(p);
+ pg->seed = ceph_decode_32(p);
+ *p += 4; /* skip preferred */
+ return 0;
+
+bad:
+ dout("error decoding pgid\n");
+ return -EINVAL;
+}
+
/*
* decode a full map.
*/
@@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
for (i = 0; i < len; i++) {
int n, j;
struct ceph_pg pgid;
- struct ceph_pg_v1 pgid_v1;
struct ceph_pg_mapping *pg;
- ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
- ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
- pgid.pool = le32_to_cpu(pgid_v1.pool);
- pgid.seed = le16_to_cpu(pgid_v1.ps);
+ err = __decode_pgid(p, end, &pgid);
+ if (err)
+ goto bad;
+ ceph_decode_need(p, end, sizeof(u32), bad);
n = ceph_decode_32(p);
err = -EINVAL;
if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
@@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
u16 version;
ceph_decode_16_safe(p, end, version, bad);
- if (version > 6) {
- pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6);
+ if (version != 6) {
+ pr_warning("got unknown v %d != 6 of inc osdmap\n", version);
goto bad;
}
@@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
while (len--) {
struct ceph_pg_mapping *pg;
int j;
- struct ceph_pg_v1 pgid_v1;
struct ceph_pg pgid;
u32 pglen;
- ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
- ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
- pgid.pool = le32_to_cpu(pgid_v1.pool);
- pgid.seed = le16_to_cpu(pgid_v1.ps);
- pglen = ceph_decode_32(p);
+ err = __decode_pgid(p, end, &pgid);
+ if (err)
+ goto bad;
+ ceph_decode_need(p, end, sizeof(u32), bad);
+ pglen = ceph_decode_32(p);
if (pglen) {
ceph_decode_need(p, end, pglen*sizeof(u32), bad);
diff --git a/net/core/dev.c b/net/core/dev.c
index a06a7a58dd11..dffbef70cd31 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3444,6 +3444,7 @@ ncls:
}
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
+ ret = NET_RX_SUCCESS;
goto unlock;
case RX_HANDLER_ANOTHER:
goto another_round;
@@ -4103,7 +4104,7 @@ static void net_rx_action(struct softirq_action *h)
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
- if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
+ if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
goto softnet_break;
local_irq_enable();
@@ -4780,7 +4781,7 @@ EXPORT_SYMBOL(dev_set_mac_address);
/**
* dev_change_carrier - Change device carrier
* @dev: device
- * @new_carries: new value
+ * @new_carrier: new value
*
* Change device carrier
*/
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b376410ff259..a585d45cc9d9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -979,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
* report anything.
*/
ivi.spoofchk = -1;
+ memset(ivi.mac, 0, sizeof(ivi.mac));
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
vf_mac.vf =
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 1b588e23cf80..21291f1abcd6 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
if (!netdev->dcbnl_ops->getpermhwaddr)
return -EOPNOTSUPP;
+ memset(perm_addr, 0, sizeof(perm_addr));
netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
@@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->ieee_getets) {
struct ieee_ets ets;
+ memset(&ets, 0, sizeof(ets));
err = ops->ieee_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
@@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->ieee_getmaxrate) {
struct ieee_maxrate maxrate;
+ memset(&maxrate, 0, sizeof(maxrate));
err = ops->ieee_getmaxrate(netdev, &maxrate);
if (!err) {
err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
@@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->ieee_getpfc) {
struct ieee_pfc pfc;
+ memset(&pfc, 0, sizeof(pfc));
err = ops->ieee_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
@@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
/* get peer info if available */
if (ops->ieee_peer_getets) {
struct ieee_ets ets;
+ memset(&ets, 0, sizeof(ets));
err = ops->ieee_peer_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
@@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->ieee_peer_getpfc) {
struct ieee_pfc pfc;
+ memset(&pfc, 0, sizeof(pfc));
err = ops->ieee_peer_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
@@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
/* peer info if available */
if (ops->cee_peer_getpg) {
struct cee_pg pg;
+ memset(&pg, 0, sizeof(pg));
err = ops->cee_peer_getpg(netdev, &pg);
if (!err &&
nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
@@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
if (ops->cee_peer_getpfc) {
struct cee_pfc pfc;
+ memset(&pfc, 0, sizeof(pfc));
err = ops->cee_peer_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 8c2251fb0a3f..bba5f8336317 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -84,7 +84,7 @@
(memcmp(addr1, addr2, length >> 3) == 0)
/* local link, i.e. FE80::/10 */
-#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
+#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
/*
* check whether we can compress the IID to 16 bits,
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7d1874be1df3..786d97aee751 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock);
* tcp/dccp_create_openreq_child().
*/
void inet_csk_prepare_forced_close(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
{
/* sk_clone_lock locked the socket and set refcnt to 2 */
bh_unlock_sock(sk);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 87abd3e2bd32..2bdf802e28e2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
- } else
+ kfree_skb(skb);
+ } else {
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
- kfree_skb(skb);
+ consume_skb(skb);
+ }
}
}
out:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index f6289bf6f332..310a3647c83d 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -423,7 +423,7 @@ int ip_options_compile(struct net *net,
put_unaligned_be32(midtime, timeptr);
opt->is_changed = 1;
}
- } else {
+ } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
unsigned int overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a759e19496d2..0d9bdacce99f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tcp_checksum_complete_user(sk, skb))
goto csum_error;
+ if ((int)skb->truesize > sk->sk_forward_alloc)
+ goto step5;
+
/* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_rcv_rtt_measure_ts(sk, skb);
- if ((int)skb->truesize > sk->sk_forward_alloc)
- goto step5;
-
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5b10414e619e..e33fe0ab2568 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -241,9 +241,11 @@ resubmit:
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff);
}
- } else
+ kfree_skb(skb);
+ } else {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
- kfree_skb(skb);
+ consume_skb(skb);
+ }
}
rcu_read_unlock();
return 0;
@@ -279,7 +281,8 @@ int ip6_mc_input(struct sk_buff *skb)
* IPv6 multicast router mode is now supported ;)
*/
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
- !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
+ !(ipv6_addr_type(&hdr->daddr) &
+ (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/*
* Okay, we try to forward - split and duplicate
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 928266569689..e5fe0041adfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
restart:
read_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
- if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
+ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+ (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
ip6_del_rt(rt);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 9a5fd3c3e530..362ba47968e4 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
struct tty_port *port = &self->port;
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ int do_clocal = 0;
unsigned long flags;
IRDA_DEBUG(2, "%s()\n", __func__ );
@@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
* If non-blocking mode is set, or the port is not enabled,
* then make the check up front and then exit.
*/
- if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
- /* nonblock mode is set or port is not enabled */
+ if (test_bit(TTY_IO_ERROR, &tty->flags)) {
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ /* nonblock mode is set */
+ if (tty->termios.c_cflag & CBAUD)
+ tty_port_raise_dtr_rts(port);
port->flags |= ASYNC_NORMAL_ACTIVE;
IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
return 0;
@@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
__FILE__, __LINE__, tty->driver->name, port->count);
spin_lock_irqsave(&port->lock, flags);
- if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ if (!tty_hung_up_p(filp))
port->count--;
- }
- spin_unlock_irqrestore(&port->lock, flags);
port->blocked_open++;
+ spin_unlock_irqrestore(&port->lock, flags);
while (1) {
if (tty->termios.c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
@@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&port->open_wait, &wait);
- if (extra_count) {
- /* ++ is not atomic, so this should be protected - Jean II */
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ if (!tty_hung_up_p(filp))
port->count++;
- spin_unlock_irqrestore(&port->lock, flags);
- }
port->blocked_open--;
+ spin_unlock_irqrestore(&port->lock, flags);
IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
__FILE__, __LINE__, tty->driver->name, port->count);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index e71e85ba2bf1..29340a9a6fb9 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
/* case CS_ISO_8859_9: */
/* case CS_UNICODE: */
default:
- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
- __func__, ias_charset_types[charset]);
+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
+ __func__, charset,
+ charset < ARRAY_SIZE(ias_charset_types) ?
+ ias_charset_types[charset] :
+ "(unknown)");
/* Aborting, close connection! */
iriap_disconnect_request(self);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 556fdafdd1ea..8555f331ea60 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);
xp->priority = pol->sadb_x_policy_priority;
- sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
+ sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
if (!xp->family) {
err = -EINVAL;
@@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
if (xp->selector.sport)
xp->selector.sport_mask = htons(0xffff);
- sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1],
+ sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);
xp->selector.prefixlen_d = sa->sadb_address_prefixlen;
@@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
memset(&sel, 0, sizeof(sel));
- sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
+ sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
sel.prefixlen_s = sa->sadb_address_prefixlen;
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
if (sel.sport)
sel.sport_mask = htons(0xffff);
- sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1],
+ sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
sel.prefixlen_d = sa->sadb_address_prefixlen;
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 3f4e3afc191a..6a53371dba1f 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(ps->tunnel_sock);
+ sock_put(sk);
return error;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 09d96a8f6c2c..fb306814576a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3285,6 +3285,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_chanctx_conf *chanctx_conf;
int ret = -ENODATA;
@@ -3293,6 +3294,16 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
if (chanctx_conf) {
*chandef = chanctx_conf->def;
ret = 0;
+ } else if (local->open_count > 0 &&
+ local->open_count == local->monitors &&
+ sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (local->use_chanctx)
+ *chandef = local->monitor_chandef;
+ else
+ cfg80211_chandef_create(chandef,
+ local->_oper_channel,
+ local->_oper_channel_type);
+ ret = 0;
}
rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 2c059e54e885..baaa8608e52d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
lockdep_assert_held(&local->mtx);
- active = !list_empty(&local->chanctx_list);
+ active = !list_empty(&local->chanctx_list) || local->monitors;
if (!local->ops->remain_on_channel) {
list_for_each_entry(roc, &local->roc_list, list) {
@@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
ieee80211_adjust_monitor_flags(sdata, 1);
ieee80211_configure_filter(local);
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
netif_carrier_on(dev);
break;
@@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_adjust_monitor_flags(sdata, -1);
ieee80211_configure_filter(local);
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
break;
case NL80211_IFTYPE_P2P_DEVICE:
/* relies on synchronize_rcu() below */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9f6464f3e05f..141577412d84 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
mask) >> shift;
+ if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ continue;
+
switch (ap_mcs) {
default:
if (our_mcs <= ap_mcs)
@@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/*
+ * Stop timers before deleting work items, as timers
+ * could race and re-add the work-items. They will be
+ * re-established on connection.
+ */
+ del_timer_sync(&ifmgd->conn_mon_timer);
+ del_timer_sync(&ifmgd->bcn_mon_timer);
+
+ /*
* we need to use atomic bitops for the running bits
* only because both timers might fire at the same
* time -- the code here is properly synchronised.
@@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
- cancel_work_sync(&ifmgd->chswitch_work);
if (del_timer_sync(&ifmgd->chswitch_timer))
set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
-
- /* these will just be re-established on connection */
- del_timer_sync(&ifmgd->conn_mon_timer);
- del_timer_sync(&ifmgd->bcn_mon_timer);
+ cancel_work_sync(&ifmgd->chswitch_work);
}
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
@@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ /*
+ * Make sure some work items will not run after this,
+ * they will not do anything but might not have been
+ * cancelled when disconnecting.
+ */
+ cancel_work_sync(&ifmgd->monitor_work);
+ cancel_work_sync(&ifmgd->beacon_connection_loss_work);
+ cancel_work_sync(&ifmgd->request_smps_work);
+ cancel_work_sync(&ifmgd->csa_connection_drop_work);
+ cancel_work_sync(&ifmgd->chswitch_work);
+
mutex_lock(&ifmgd->mtx);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index de8548bf0a7f..8914d2d2881a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
if (local->queue_stop_reasons[q] ||
(!txpending && !skb_queue_empty(&local->pending[q]))) {
if (unlikely(info->flags &
- IEEE80211_TX_INTFL_OFFCHAN_TX_OK &&
- local->queue_stop_reasons[q] &
- ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) {
+ IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
+ if (local->queue_stop_reasons[q] &
+ ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
+ /*
+ * Drop off-channel frames if queues
+ * are stopped for any reason other
+ * than off-channel operation. Never
+ * queue them.
+ */
+ spin_unlock_irqrestore(
+ &local->queue_stop_reason_lock,
+ flags);
+ ieee80211_purge_tx_queue(&local->hw,
+ skbs);
+ return true;
+ }
+ } else {
+
/*
- * Drop off-channel frames if queues are stopped
- * for any reason other than off-channel
- * operation. Never queue them.
+ * Since queue is stopped, queue up frames for
+ * later transmission from the tx-pending
+ * tasklet when the queue is woken again.
*/
- spin_unlock_irqrestore(
- &local->queue_stop_reason_lock, flags);
- ieee80211_purge_tx_queue(&local->hw, skbs);
- return true;
+ if (txpending)
+ skb_queue_splice_init(skbs,
+ &local->pending[q]);
+ else
+ skb_queue_splice_tail_init(skbs,
+ &local->pending[q]);
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
+ return false;
}
-
- /*
- * Since queue is stopped, queue up frames for later
- * transmission from the tx-pending tasklet when the
- * queue is woken again.
- */
- if (txpending)
- skb_queue_splice_init(skbs, &local->pending[q]);
- else
- skb_queue_splice_tail_init(skbs,
- &local->pending[q]);
-
- spin_unlock_irqrestore(&local->queue_stop_reason_lock,
- flags);
- return false;
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
}
if (!is_multicast_ether_addr(skb->data)) {
+ struct sta_info *next_hop;
+ bool mpp_lookup = true;
+
mpath = mesh_path_lookup(sdata, skb->data);
- if (!mpath)
+ if (mpath) {
+ mpp_lookup = false;
+ next_hop = rcu_dereference(mpath->next_hop);
+ if (!next_hop ||
+ !(mpath->flags & (MESH_PATH_ACTIVE |
+ MESH_PATH_RESOLVING)))
+ mpp_lookup = true;
+ }
+
+ if (mpp_lookup)
mppath = mpp_path_lookup(sdata, skb->data);
+
+ if (mppath && mpath)
+ mesh_path_del(mpath->sdata, mpath->dst);
}
/*
@@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
if (local->tim_in_locked_section) {
__ieee80211_beacon_add_tim(sdata, ps, skb);
} else {
- spin_lock(&local->tim_lock);
+ spin_lock_bh(&local->tim_lock);
__ieee80211_beacon_add_tim(sdata, ps, skb);
- spin_unlock(&local->tim_lock);
+ spin_unlock_bh(&local->tim_lock);
}
return 0;
@@ -2724,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
- sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
if (!ieee80211_tx_prepare(sdata, &tx, skb))
break;
dev_kfree_skb_any(skb);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f82b2e606cfd..1ba9dbc0e107 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
if (ret == -EAGAIN)
ret = 1;
- return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+ return (ret < 0 && ret != -ENOTEMPTY) ? ret :
+ ret > 0 ? 0 : -IPSET_ERR_EXIST;
}
/* Get headed data of a set */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index a9740bd6fe54..94b4b9853f60 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
{
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
/* Called from the helper function, this call never fails */
help = nfct_help(ct);
@@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
helper = rcu_dereference(help->helper);
nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
- "nf_ct_%s: dropping packet: %s ", helper->name, fmt);
+ "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf);
+
+ va_end(args);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_log);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index d578ec251712..0b1b32cda307 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id)
}
EXPORT_SYMBOL_GPL(nfnl_unlock);
-static struct mutex *nfnl_get_lock(__u8 subsys_id)
-{
- return &table[subsys_id].mutex;
-}
-
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
{
nfnl_lock(n->subsys_id);
@@ -199,7 +194,7 @@ replay:
rcu_read_unlock();
nfnl_lock(subsys_id);
if (rcu_dereference_protected(table[subsys_id].subsys,
- lockdep_is_held(nfnl_get_lock(subsys_id))) != ss ||
+ lockdep_is_held(&table[subsys_id].mutex)) != ss ||
nfnetlink_find_client(type, ss) != nc)
err = -EAGAIN;
else if (nc->call)
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index ba92824086f3..3228d7f24eb4 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
const struct xt_audit_info *info = par->targinfo;
struct audit_buffer *ab;
+ if (audit_enabled == 0)
+ goto errout;
+
ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
if (ab == NULL)
goto errout;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 847d495cd4de..8a6c6ea466d8 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
struct netlbl_unlhsh_walk_arg cb_arg;
u32 skip_bkt = cb->args[0];
u32 skip_chain = cb->args[1];
- u32 skip_addr4 = cb->args[2];
- u32 skip_addr6 = cb->args[3];
u32 iter_bkt;
u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
struct netlbl_unlhsh_iface *iface;
@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
continue;
netlbl_af4list_foreach_rcu(addr4,
&iface->addr4_list) {
- if (iter_addr4++ < skip_addr4)
+ if (iter_addr4++ < cb->args[2])
continue;
if (netlbl_unlabel_staticlist_gen(
NLBL_UNLABEL_C_STATICLIST,
@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(addr6,
&iface->addr6_list) {
- if (iter_addr6++ < skip_addr6)
+ if (iter_addr6++ < cb->args[3])
continue;
if (netlbl_unlabel_staticlist_gen(
NLBL_UNLABEL_C_STATICLIST,
@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
unlabel_staticlist_return:
rcu_read_unlock();
- cb->args[0] = skip_bkt;
- cb->args[1] = skip_chain;
- cb->args[2] = skip_addr4;
- cb->args[3] = skip_addr6;
+ cb->args[0] = iter_bkt;
+ cb->args[1] = iter_chain;
+ cb->args[2] = iter_addr4;
+ cb->args[3] = iter_addr6;
return skb->len;
}
@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
{
struct netlbl_unlhsh_walk_arg cb_arg;
struct netlbl_unlhsh_iface *iface;
- u32 skip_addr4 = cb->args[0];
- u32 skip_addr6 = cb->args[1];
- u32 iter_addr4 = 0;
+ u32 iter_addr4 = 0, iter_addr6 = 0;
struct netlbl_af4list *addr4;
#if IS_ENABLED(CONFIG_IPV6)
- u32 iter_addr6 = 0;
struct netlbl_af6list *addr6;
#endif
@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
goto unlabel_staticlistdef_return;
netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) {
- if (iter_addr4++ < skip_addr4)
+ if (iter_addr4++ < cb->args[0])
continue;
if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
iface,
@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
- if (iter_addr6++ < skip_addr6)
+ if (iter_addr6++ < cb->args[1])
continue;
if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
iface,
@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
unlabel_staticlistdef_return:
rcu_read_unlock();
- cb->args[0] = skip_addr4;
- cb->args[1] = skip_addr6;
+ cb->args[0] = iter_addr4;
+ cb->args[1] = iter_addr6;
return skb->len;
}
diff --git a/net/rds/message.c b/net/rds/message.c
index f0a4658f3273..aba232f9f308 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
void rds_message_put(struct rds_message *rm)
{
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
- if (atomic_read(&rm->m_refcount) == 0) {
-printk(KERN_CRIT "danger refcount zero on %p\n", rm);
-WARN_ON(1);
- }
+ WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
if (atomic_dec_and_test(&rm->m_refcount)) {
BUG_ON(!list_empty(&rm->m_sock_item));
BUG_ON(!list_empty(&rm->m_conn_item));
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
{
struct rds_message *rm;
+ if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
+ return NULL;
+
rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
if (!rm)
goto out;
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7be790d60b90..73be187d389e 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter,
for (i = 0; i < nr; i++) {
BUG_ON(strlen(names[i]) >= sizeof(ctr.name));
strncpy(ctr.name, names[i], sizeof(ctr.name) - 1);
+ ctr.name[sizeof(ctr.name) - 1] = '\0';
ctr.value = values[i];
rds_info_copy(iter, &ctr, sizeof(ctr));
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e9a77f621c3d..d51852bba01c 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
new_num_classes == q->max_agg_classes - 1) /* agg no more full */
hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
+ /* The next assignment may let
+ * agg->initial_budget > agg->budgetmax
+ * hold, we will take it into account in charge_actual_service().
+ */
agg->budgetmax = new_num_classes * agg->lmax;
new_agg_weight = agg->class_weight * new_num_classes;
agg->inv_w = ONE_FP/new_agg_weight;
@@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q)
unsigned long old_vslot = q->oldV >> q->min_slot_shift;
if (vslot != old_vslot) {
- unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
+ unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
qfq_move_groups(q, mask, IR, ER);
qfq_move_groups(q, mask, IB, EB);
}
@@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
/* Update F according to the actual service received by the aggregate. */
static inline void charge_actual_service(struct qfq_aggregate *agg)
{
- /* compute the service received by the aggregate */
- u32 service_received = agg->initial_budget - agg->budget;
+ /* Compute the service received by the aggregate, taking into
+ * account that, after decreasing the number of classes in
+ * agg, it may happen that
+ * agg->initial_budget - agg->budget > agg->bugdetmax
+ */
+ u32 service_received = min(agg->budgetmax,
+ agg->initial_budget - agg->budget);
agg->F = agg->S + (u64)service_received * agg->inv_w;
}
+static inline void qfq_update_agg_ts(struct qfq_sched *q,
+ struct qfq_aggregate *agg,
+ enum update_reason reason);
+
+static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
+
static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
in_serv_agg->initial_budget = in_serv_agg->budget =
in_serv_agg->budgetmax;
- if (!list_empty(&in_serv_agg->active))
+ if (!list_empty(&in_serv_agg->active)) {
/*
* Still active: reschedule for
* service. Possible optimization: if no other
@@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
* handle it, we would need to maintain an
* extra num_active_aggs field.
*/
- qfq_activate_agg(q, in_serv_agg, requeue);
- else if (sch->q.qlen == 0) { /* no aggregate to serve */
+ qfq_update_agg_ts(q, in_serv_agg, requeue);
+ qfq_schedule_agg(q, in_serv_agg);
+ } else if (sch->q.qlen == 0) { /* no aggregate to serve */
q->in_serv_agg = NULL;
return NULL;
}
@@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
qdisc_bstats_update(sch, skb);
agg_dequeue(in_serv_agg, cl, len);
- in_serv_agg->budget -= len;
+ /* If lmax is lowered, through qfq_change_class, for a class
+ * owning pending packets with larger size than the new value
+ * of lmax, then the following condition may hold.
+ */
+ if (unlikely(in_serv_agg->budget < len))
+ in_serv_agg->budget = 0;
+ else
+ in_serv_agg->budget -= len;
+
q->V += (u64)len * IWSUM;
pr_debug("qfq dequeue: len %u F %lld now %lld\n",
len, (unsigned long long) in_serv_agg->F,
@@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = agg->lmax;
list_add_tail(&cl->alist, &agg->active);
- if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
- return err; /* aggregate was not empty, nothing else to do */
+ if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
+ q->in_serv_agg == agg)
+ return err; /* non-empty or in service, nothing else to do */
- /* recharge budget */
- agg->initial_budget = agg->budget = agg->budgetmax;
-
- qfq_update_agg_ts(q, agg, enqueue);
- if (q->in_serv_agg == NULL)
- q->in_serv_agg = agg;
- else if (agg != q->in_serv_agg)
- qfq_schedule_agg(q, agg);
+ qfq_activate_agg(q, agg, enqueue);
return err;
}
@@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
/* group was surely ineligible, remove */
__clear_bit(grp->index, &q->bitmaps[IR]);
__clear_bit(grp->index, &q->bitmaps[IB]);
- } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
+ } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
+ q->in_serv_agg == NULL)
q->V = roundedS;
grp->S = roundedS;
@@ -1284,8 +1303,15 @@ skip_update:
static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
enum update_reason reason)
{
+ agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
+
qfq_update_agg_ts(q, agg, reason);
- qfq_schedule_agg(q, agg);
+ if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
+ q->in_serv_agg = agg; /* start serving this aggregate */
+ /* update V: to be in service, agg must be eligible */
+ q->oldV = q->V = agg->S;
+ } else if (agg != q->in_serv_agg)
+ qfq_schedule_agg(q, agg);
}
static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
@@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
__set_bit(grp->index, &q->bitmaps[s]);
}
}
-
- qfq_update_eligible(q);
}
static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2b3ef03c6098..12ed45dbe75d 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* SCTP-AUTH extensions*/
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
- null_key = sctp_auth_shkey_create(0, GFP_KERNEL);
+ null_key = sctp_auth_shkey_create(0, gfp);
if (!null_key)
goto nomem;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c99458df3f3f..b9070736b8d9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
if (len < sizeof(sctp_assoc_t))
return -EINVAL;
+ /* Allow the struct to grow and fill in as much as possible */
+ len = min_t(size_t, len, sizeof(sas));
+
if (copy_from_user(&sas, optval, len))
return -EFAULT;
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
/* Mark beginning of a new observation period */
asoc->stats.max_obs_rto = asoc->rto_min;
- /* Allow the struct to grow and fill in as much as possible */
- len = min_t(size_t, len, sizeof(sas));
-
if (put_user(len, optlen))
return -EFAULT;
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 442ad4ed6315..825ea94415b3 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -41,8 +41,6 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-#define MAX_KMALLOC_SIZE 131072
-
static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
__u16 out);
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
int size;
size = sctp_ssnmap_size(in, out);
- if (size <= MAX_KMALLOC_SIZE)
+ if (size <= KMALLOC_MAX_SIZE)
retval = kmalloc(size, gfp);
else
retval = (struct sctp_ssnmap *)
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
return retval;
fail_map:
- if (size <= MAX_KMALLOC_SIZE)
+ if (size <= KMALLOC_MAX_SIZE)
kfree(retval);
else
free_pages((unsigned long)retval, get_order(size));
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
int size;
size = sctp_ssnmap_size(map->in.len, map->out.len);
- if (size <= MAX_KMALLOC_SIZE)
+ if (size <= KMALLOC_MAX_SIZE)
kfree(map);
else
free_pages((unsigned long)map, get_order(size));
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 5f25e0c92c31..396c45174e5b 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -51,7 +51,7 @@
static void sctp_tsnmap_update(struct sctp_tsnmap *map);
static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
__u16 len, __u16 *start, __u16 *end);
-static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap);
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
/* Initialize a block of memory as a tsnmap. */
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
gap = tsn - map->base_tsn;
- if (gap >= map->len && !sctp_tsnmap_grow(map, gap))
+ if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
return -ENOMEM;
if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
return ngaps;
}
-static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap)
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
{
unsigned long *new;
unsigned long inc;
u16 len;
- if (gap >= SCTP_TSN_MAP_SIZE)
+ if (size > SCTP_TSN_MAP_SIZE)
return 0;
- inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
+ inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
new = kzalloc(len>>3, GFP_ATOMIC);
if (!new)
return 0;
- bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn);
+ bitmap_copy(new, map->tsn_map,
+ map->max_tsn_seen - map->cumulative_tsn_ack_point);
kfree(map->tsn_map);
map->tsn_map = new;
map->len = len;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ada17464b65b..0fd5b3d2df03 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
{
struct sk_buff_head temp;
struct sctp_ulpevent *event;
+ int event_eor = 0;
/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
- if (event)
+ if (event) {
+ event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_ulpq_tail_event(ulpq, event);
+ }
- return 0;
+ return event_eor;
}
/* Add a new event for propagation to the ULP. */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!first_frag)
+ return NULL;
+ goto done;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
- } else if (next_tsn == ctsn)
+ } else if (next_tsn == ctsn) {
next_tsn++;
- else
+ last_frag = pos;
+ } else
goto done;
break;
case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
} else
goto done;
break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag)
+ return NULL;
+ else
+ goto done;
+ break;
+
default:
return NULL;
}
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed)
{
__u16 freed = 0;
- __u32 tsn;
- struct sk_buff *skb;
+ __u32 tsn, last_tsn;
+ struct sk_buff *skb, *flist, *last;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
- while ((skb = __skb_dequeue_tail(list)) != NULL) {
- freed += skb_headlen(skb);
+ while ((skb = skb_peek_tail(list)) != NULL) {
event = sctp_skb2event(skb);
tsn = event->tsn;
+ /* Don't renege below the Cumulative TSN ACK Point. */
+ if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
+ break;
+
+ /* Events in ordering queue may have multiple fragments
+ * corresponding to additional TSNs. Sum the total
+ * freed space; find the last TSN.
+ */
+ freed += skb_headlen(skb);
+ flist = skb_shinfo(skb)->frag_list;
+ for (last = flist; flist; flist = flist->next) {
+ last = flist;
+ freed += skb_headlen(last);
+ }
+ if (last)
+ last_tsn = sctp_skb2event(last)->tsn;
+ else
+ last_tsn = tsn;
+
+ /* Unlink the event, then renege all applicable TSNs. */
+ __skb_unlink(skb, list);
sctp_ulpevent_free(event);
- sctp_tsnmap_renege(tsnmap, tsn);
+ while (TSN_lte(tsn, last_tsn)) {
+ sctp_tsnmap_renege(tsnmap, tsn);
+ tsn++;
+ }
if (freed >= needed)
return freed;
}
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event;
struct sctp_association *asoc;
struct sctp_sock *sp;
+ __u32 ctsn;
+ struct sk_buff *skb;
asoc = ulpq->asoc;
sp = sctp_sk(asoc->base.sk);
/* If the association is already in Partial Delivery mode
- * we have noting to do.
+ * we have nothing to do.
*/
if (ulpq->pd_mode)
return;
+ /* Data must be at or below the Cumulative TSN ACK Point to
+ * start partial delivery.
+ */
+ skb = skb_peek(&asoc->ulpq.reasm);
+ if (skb != NULL) {
+ ctsn = sctp_skb2event(skb)->tsn;
+ if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
+ return;
+ }
+
/* If the user enabled fragment interleave socket option,
* multiple associations can enter partial delivery.
* Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
- __u32 tsn;
- tsn = ntohl(chunk->subh.data_hdr->tsn);
- sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
- sctp_ulpq_tail_data(ulpq, chunk, gfp);
-
- sctp_ulpq_partial_delivery(ulpq, gfp);
+ int retval;
+ retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+ /*
+ * Enter partial delivery if chunk has not been
+ * delivered; otherwise, drain the reassembly queue.
+ */
+ if (retval <= 0)
+ sctp_ulpq_partial_delivery(ulpq, gfp);
+ else if (retval == 1)
+ sctp_ulpq_reasm_drain(ulpq);
}
sk_mem_reclaim(asoc->base.sk);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index f7d34e7b6f81..5ead60550895 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd,
else {
int N, i;
+ /*
+ * NOTE: we skip uid_valid()/gid_valid() checks here:
+ * instead, * -1 id's are later mapped to the
+ * (export-specific) anonymous id by nfsd_setuser.
+ *
+ * (But supplementary gid's get no such special
+ * treatment so are checked for validity here.)
+ */
/* uid */
rsci.cred.cr_uid = make_kuid(&init_user_ns, id);
- if (!uid_valid(rsci.cred.cr_uid))
- goto out;
/* gid */
if (get_int(&mesg, &id))
goto out;
rsci.cred.cr_gid = make_kgid(&init_user_ns, id);
- if (!gid_valid(rsci.cred.cr_gid))
- goto out;
/* number of additional gid's */
if (get_int(&mesg, &N))
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7b9b40224a27..a9129f8d7070 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1174,6 +1174,8 @@ static struct file_system_type rpc_pipe_fs_type = {
.mount = rpc_mount,
.kill_sb = rpc_kill_sb,
};
+MODULE_ALIAS_FS("rpc_pipefs");
+MODULE_ALIAS("rpc_pipefs");
static void
init_once(void *foo)
@@ -1218,6 +1220,3 @@ void unregister_rpc_pipefs(void)
kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
}
-
-/* Make 'mount -t rpc_pipefs ...' autoload this module. */
-MODULE_ALIAS("rpc_pipefs");
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c1d8476b7692..3d02130828da 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt)
xs_tcp_shutdown(xprt);
}
+static void xs_local_destroy(struct rpc_xprt *xprt)
+{
+ xs_close(xprt);
+ xs_free_peer_addresses(xprt);
+ xprt_free(xprt);
+ module_put(THIS_MODULE);
+}
+
/**
* xs_destroy - prepare to shutdown a transport
* @xprt: doomed transport
@@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
cancel_delayed_work_sync(&transport->connect_worker);
- xs_close(xprt);
- xs_free_peer_addresses(xprt);
- xprt_free(xprt);
- module_put(THIS_MODULE);
+ xs_local_destroy(xprt);
}
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = {
.send_request = xs_local_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
.close = xs_close,
- .destroy = xs_destroy,
+ .destroy = xs_local_destroy,
.print_stats = xs_local_print_stats,
};
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 5ffff039b017..ea4155fe9733 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
rdev->wiphy.rts_threshold = (u32) -1;
rdev->wiphy.coverage_class = 0;
- rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
- NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
+ rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
return &rdev->wiphy;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 35545ccc30fd..d44ab216c0ec 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -554,27 +554,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
goto nla_put_failure;
- if (chan->flags & IEEE80211_CHAN_RADAR) {
- u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
- if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
- chan->dfs_state))
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
- goto nla_put_failure;
- }
- if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
+ if ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
goto nla_put_failure;
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -900,9 +881,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
c->max_interfaces))
goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
- c->radar_detect_widths))
- goto nla_put_failure;
nla_nest_end(msg, nl_combi);
}
@@ -914,48 +892,6 @@ nla_put_failure:
return -ENOBUFS;
}
-#ifdef CONFIG_PM
-static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
- struct sk_buff *msg)
-{
- const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
- struct nlattr *nl_tcp;
-
- if (!tcp)
- return 0;
-
- nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
- if (!nl_tcp)
- return -ENOBUFS;
-
- if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
- tcp->data_payload_max))
- return -ENOBUFS;
-
- if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
- tcp->data_payload_max))
- return -ENOBUFS;
-
- if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
- return -ENOBUFS;
-
- if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
- sizeof(*tcp->tok), tcp->tok))
- return -ENOBUFS;
-
- if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
- tcp->data_interval_max))
- return -ENOBUFS;
-
- if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
- tcp->wake_payload_max))
- return -ENOBUFS;
-
- nla_nest_end(msg, nl_tcp);
- return 0;
-}
-#endif
-
static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
struct cfg80211_registered_device *dev)
{
@@ -1330,9 +1266,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
goto nla_put_failure;
}
- if (nl80211_send_wowlan_tcp_caps(dev, msg))
- goto nla_put_failure;
-
nla_nest_end(msg, nl_wowlan);
}
#endif
@@ -1365,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
dev->wiphy.max_acl_mac_addrs))
goto nla_put_failure;
- if (dev->wiphy.extended_capabilities &&
- (nla_put(msg, NL80211_ATTR_EXT_CAPA,
- dev->wiphy.extended_capabilities_len,
- dev->wiphy.extended_capabilities) ||
- nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
- dev->wiphy.extended_capabilities_len,
- dev->wiphy.extended_capabilities_mask)))
- goto nla_put_failure;
-
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -1383,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
{
- int idx = 0;
+ int idx = 0, ret;
int start = cb->args[0];
struct cfg80211_registered_device *dev;
@@ -1393,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (++idx <= start)
continue;
- if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- dev) < 0) {
+ ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ dev);
+ if (ret < 0) {
+ /*
+ * If sending the wiphy data didn't fit (ENOBUFS or
+ * EMSGSIZE returned), this SKB is still empty (so
+ * it's not too big because another wiphy dataset is
+ * already in the skb) and we've not tried to adjust
+ * the dump allocation yet ... then adjust the alloc
+ * size to be bigger, and return 1 but with the empty
+ * skb. This results in an empty message being RX'ed
+ * in userspace, but that is ignored.
+ *
+ * We can then retry with the larger buffer.
+ */
+ if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
+ !skb->len &&
+ cb->min_dump_alloc < 4096) {
+ cb->min_dump_alloc = 4096;
+ mutex_unlock(&cfg80211_mutex);
+ return 1;
+ }
idx--;
break;
}
@@ -1412,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
struct cfg80211_registered_device *dev = info->user_ptr[0];
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(4096, GFP_KERNEL);
if (!msg)
return -ENOMEM;
OpenPOWER on IntegriCloud