diff options
Diffstat (limited to 'net')
30 files changed, 236 insertions, 135 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b28..314c73ed418f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - mdst = br_mdb_get(br, skb); + mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 480330151898..828e2bcc1f52 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (is_broadcast_ether_addr(dest)) skb2 = skb; else if (is_multicast_ether_addr(dest)) { - mdst = br_mdb_get(br, skb); + mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && mdst->mglist) || br_multicast_is_router(br)) diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc65..ee79f3f20383 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, port = p->port; if (port) { struct br_mdb_entry e; + memset(&e, 0, sizeof(e)); e.ifindex = port->dev->ifindex; e.state = p->state; if (p->addr.proto == htons(ETH_P_IP)) @@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) break; bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); bpm->ifindex = dev->ifindex; if (br_mdb_fill_info(skb, cb, dev) < 0) goto out; @@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, return -EMSGSIZE; bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); bpm->family = AF_BRIDGE; bpm->ifindex = dev->ifindex; nest = nla_nest_start(skb, MDBA_MDB); @@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, { struct br_mdb_entry entry; + memset(&entry, 0, sizeof(entry)); entry.ifindex = port->dev->ifindex; entry.addr.proto = group->proto; entry.addr.u.ip4 = group->u.ip4; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb62..923fbeaf7afd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( #endif struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb) + struct sk_buff *skb, u16 vid) { struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; @@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, return NULL; ip.proto = skb->protocol; + ip.vid = vid; switch (skb->protocol) { case htons(ETH_P_IP): diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bcb..3cbf5beb3d4b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -442,7 +442,7 @@ extern int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb); extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb); + struct sk_buff *skb, u16 vid); extern void br_multicast_add_port(struct net_bridge_port *port); extern void br_multicast_del_port(struct net_bridge_port *port); extern void br_multicast_enable_port(struct net_bridge_port *port); @@ -504,7 +504,7 @@ static inline int br_multicast_rcv(struct net_bridge *br, } static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb) + struct sk_buff *skb, u16 vid) { return NULL; } diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 69bc4bf89e3e..4543b9aba40c 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) return 0; } +static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) +{ + u8 v; + + ceph_decode_need(p, end, 1+8+4+4, bad); + v = ceph_decode_8(p); + if (v != 1) + goto bad; + pg->pool = ceph_decode_64(p); + pg->seed = ceph_decode_32(p); + *p += 4; /* skip preferred */ + return 0; + +bad: + dout("error decoding pgid\n"); + return -EINVAL; +} + /* * decode a full map. */ @@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) for (i = 0; i < len; i++) { int n, j; struct ceph_pg pgid; - struct ceph_pg_v1 pgid_v1; struct ceph_pg_mapping *pg; - ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); - ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); - pgid.pool = le32_to_cpu(pgid_v1.pool); - pgid.seed = le16_to_cpu(pgid_v1.ps); + err = __decode_pgid(p, end, &pgid); + if (err) + goto bad; + ceph_decode_need(p, end, sizeof(u32), bad); n = ceph_decode_32(p); err = -EINVAL; if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) @@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, u16 version; ceph_decode_16_safe(p, end, version, bad); - if (version > 6) { - pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); + if (version != 6) { + pr_warning("got unknown v %d != 6 of inc osdmap\n", version); goto bad; } @@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, while (len--) { struct ceph_pg_mapping *pg; int j; - struct ceph_pg_v1 pgid_v1; struct ceph_pg pgid; u32 pglen; - ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); - ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); - pgid.pool = le32_to_cpu(pgid_v1.pool); - pgid.seed = le16_to_cpu(pgid_v1.ps); - pglen = ceph_decode_32(p); + err = __decode_pgid(p, end, &pgid); + if (err) + goto bad; + ceph_decode_need(p, end, sizeof(u32), bad); + pglen = ceph_decode_32(p); if (pglen) { ceph_decode_need(p, end, pglen*sizeof(u32), bad); diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..dffbef70cd31 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3444,6 +3444,7 @@ ncls: } switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: + ret = NET_RX_SUCCESS; goto unlock; case RX_HANDLER_ANOTHER: goto another_round; @@ -4103,7 +4104,7 @@ static void net_rx_action(struct softirq_action *h) * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ - if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) + if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) goto softnet_break; local_irq_enable(); @@ -4780,7 +4781,7 @@ EXPORT_SYMBOL(dev_set_mac_address); /** * dev_change_carrier - Change device carrier * @dev: device - * @new_carries: new value + * @new_carrier: new value * * Change device carrier */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..a585d45cc9d9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -979,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, * report anything. */ ivi.spoofchk = -1; + memset(ivi.mac, 0, sizeof(ivi.mac)); if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf80..21291f1abcd6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; + memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); @@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getets) { struct ieee_ets ets; + memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) @@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; + memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, @@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getpfc) { struct ieee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) @@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; + memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) @@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) @@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; + memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) @@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->cee_peer_getpfc) { struct cee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3f..bba5f8336317 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -84,7 +84,7 @@ (memcmp(addr1, addr2, length >> 3) == 0) /* local link, i.e. FE80::/10 */ -#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) +#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) /* * check whether we can compress the IID to 16 bits, diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); * tcp/dccp_create_openreq_child(). */ void inet_csk_prepare_forced_close(struct sock *sk) + __releases(&sk->sk_lock.slock) { /* sk_clone_lock locked the socket and set refcnt to 2 */ bh_unlock_sock(sk); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..310a3647c83d 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -423,7 +423,7 @@ int ip_options_compile(struct net *net, put_unaligned_be32(midtime, timeptr); opt->is_changed = 1; } - } else { + } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { unsigned int overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b1876e52091e..e33fe0ab2568 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -281,7 +281,8 @@ int ip6_mc_input(struct sk_buff *skb) * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && - !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && + !(ipv6_addr_type(&hdr->daddr) & + (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e530..362ba47968e4 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, struct tty_port *port = &self->port; DECLARE_WAITQUEUE(wait, current); int retval; - int do_clocal = 0, extra_count = 0; + int do_clocal = 0; unsigned long flags; IRDA_DEBUG(2, "%s()\n", __func__ ); @@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ - if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ - /* nonblock mode is set or port is not enabled */ + if (test_bit(TTY_IO_ERROR, &tty->flags)) { + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (filp->f_flags & O_NONBLOCK) { + /* nonblock mode is set */ + if (tty->termios.c_cflag & CBAUD) + tty_port_raise_dtr_rts(port); port->flags |= ASYNC_NORMAL_ACTIVE; IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); return 0; @@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, __FILE__, __LINE__, tty->driver->name, port->count); spin_lock_irqsave(&port->lock, flags); - if (!tty_hung_up_p(filp)) { - extra_count = 1; + if (!tty_hung_up_p(filp)) port->count--; - } - spin_unlock_irqrestore(&port->lock, flags); port->blocked_open++; + spin_unlock_irqrestore(&port->lock, flags); while (1) { if (tty->termios.c_cflag & CBAUD) tty_port_raise_dtr_rts(port); - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !test_bit(ASYNCB_INITIALIZED, &port->flags)) { @@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, __set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); - if (extra_count) { - /* ++ is not atomic, so this should be protected - Jean II */ - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) port->count++; - spin_unlock_irqrestore(&port->lock, flags); - } port->blocked_open--; + spin_unlock_irqrestore(&port->lock, flags); IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", __FILE__, __LINE__, tty->driver->name, port->count); diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1ea..8555f331ea60 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); xp->priority = pol->sadb_x_policy_priority; - sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); if (!xp->family) { err = -EINVAL; @@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ if (xp->selector.sport) xp->selector.sport_mask = htons(0xffff); - sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); xp->selector.prefixlen_d = sa->sadb_address_prefixlen; @@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa memset(&sel, 0, sizeof(sel)); - sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); sel.prefixlen_s = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); @@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa if (sel.sport) sel.sport_mask = htons(0xffff); - sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); sel.prefixlen_d = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 808f5fcd1ced..fb306814576a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3290,14 +3290,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, int ret = -ENODATA; rcu_read_lock(); - if (local->use_chanctx) { - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (chanctx_conf) { - *chandef = chanctx_conf->def; - ret = 0; - } - } else if (local->open_count == local->monitors) { - *chandef = local->monitor_chandef; + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (chanctx_conf) { + *chandef = chanctx_conf->def; + ret = 0; + } else if (local->open_count > 0 && + local->open_count == local->monitors && + sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (local->use_chanctx) + *chandef = local->monitor_chandef; + else + cfg80211_chandef_create(chandef, + local->_oper_channel, + local->_oper_channel_type); ret = 0; } rcu_read_unlock(); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 640afab304d7..baaa8608e52d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); netif_carrier_on(dev); break; @@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, ieee80211_adjust_monitor_flags(sdata, -1); ieee80211_configure_filter(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); break; case NL80211_IFTYPE_P2P_DEVICE: /* relies on synchronize_rcu() below */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05f..141577412d84 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & mask) >> shift; + if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) + continue; + switch (ap_mcs) { default: if (our_mcs <= ap_mcs) @@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* + * Stop timers before deleting work items, as timers + * could race and re-add the work-items. They will be + * re-established on connection. + */ + del_timer_sync(&ifmgd->conn_mon_timer); + del_timer_sync(&ifmgd->bcn_mon_timer); + + /* * we need to use atomic bitops for the running bits * only because both timers might fire at the same * time -- the code here is properly synchronised. @@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) if (del_timer_sync(&ifmgd->timer)) set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); - cancel_work_sync(&ifmgd->chswitch_work); if (del_timer_sync(&ifmgd->chswitch_timer)) set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); - - /* these will just be re-established on connection */ - del_timer_sync(&ifmgd->conn_mon_timer); - del_timer_sync(&ifmgd->bcn_mon_timer); + cancel_work_sync(&ifmgd->chswitch_work); } void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) @@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* + * Make sure some work items will not run after this, + * they will not do anything but might not have been + * cancelled when disconnecting. + */ + cancel_work_sync(&ifmgd->monitor_work); + cancel_work_sync(&ifmgd->beacon_connection_loss_work); + cancel_work_sync(&ifmgd->request_smps_work); + cancel_work_sync(&ifmgd->csa_connection_drop_work); + cancel_work_sync(&ifmgd->chswitch_work); + mutex_lock(&ifmgd->mtx); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce78d1149f1d..8914d2d2881a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2745,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, cpu_to_le16(IEEE80211_FCTL_MOREDATA); } - sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, skb)) break; dev_kfree_skb_any(skb); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a9740bd6fe54..94b4b9853f60 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, { const struct nf_conn_help *help; const struct nf_conntrack_helper *helper; + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; /* Called from the helper function, this call never fails */ help = nfct_help(ct); @@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, helper = rcu_dereference(help->helper); nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, - "nf_ct_%s: dropping packet: %s ", helper->name, fmt); + "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); + + va_end(args); } EXPORT_SYMBOL_GPL(nf_ct_helper_log); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec251712..0b1b32cda307 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id) } EXPORT_SYMBOL_GPL(nfnl_unlock); -static struct mutex *nfnl_get_lock(__u8 subsys_id) -{ - return &table[subsys_id].mutex; -} - int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) { nfnl_lock(n->subsys_id); @@ -199,7 +194,7 @@ replay: rcu_read_unlock(); nfnl_lock(subsys_id); if (rcu_dereference_protected(table[subsys_id].subsys, - lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || + lockdep_is_held(&table[subsys_id].mutex)) != ss || nfnetlink_find_client(type, ss) != nc) err = -EAGAIN; else if (nc->call) diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f3..3228d7f24eb4 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c @@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_audit_info *info = par->targinfo; struct audit_buffer *ab; + if (audit_enabled == 0) + goto errout; + ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); if (ab == NULL) goto errout; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4de..8a6c6ea466d8 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlbl_unlhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; - u32 skip_addr4 = cb->args[2]; - u32 skip_addr6 = cb->args[3]; u32 iter_bkt; u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; struct netlbl_unlhsh_iface *iface; @@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, continue; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < skip_addr4) + if (iter_addr4++ < cb->args[2]) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < skip_addr6) + if (iter_addr6++ < cb->args[3]) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, unlabel_staticlist_return: rcu_read_unlock(); - cb->args[0] = skip_bkt; - cb->args[1] = skip_chain; - cb->args[2] = skip_addr4; - cb->args[3] = skip_addr6; + cb->args[0] = iter_bkt; + cb->args[1] = iter_chain; + cb->args[2] = iter_addr4; + cb->args[3] = iter_addr6; return skb->len; } @@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, { struct netlbl_unlhsh_walk_arg cb_arg; struct netlbl_unlhsh_iface *iface; - u32 skip_addr4 = cb->args[0]; - u32 skip_addr6 = cb->args[1]; - u32 iter_addr4 = 0; + u32 iter_addr4 = 0, iter_addr6 = 0; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) - u32 iter_addr6 = 0; struct netlbl_af6list *addr6; #endif @@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, goto unlabel_staticlistdef_return; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < skip_addr4) + if (iter_addr4++ < cb->args[0]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, } #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < skip_addr6) + if (iter_addr6++ < cb->args[1]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, unlabel_staticlistdef_return: rcu_read_unlock(); - cb->args[0] = skip_addr4; - cb->args[1] = skip_addr6; + cb->args[0] = iter_addr4; + cb->args[1] = iter_addr6; return skb->len; } diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b90..73be187d389e 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c @@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, for (i = 0; i < nr; i++) { BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); + ctr.name[sizeof(ctr.name) - 1] = '\0'; ctr.value = values[i]; rds_info_copy(iter, &ctr, sizeof(ctr)); diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3d..d51852bba01c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, new_num_classes == q->max_agg_classes - 1) /* agg no more full */ hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); + /* The next assignment may let + * agg->initial_budget > agg->budgetmax + * hold, we will take it into account in charge_actual_service(). + */ agg->budgetmax = new_num_classes * agg->lmax; new_agg_weight = agg->class_weight * new_num_classes; agg->inv_w = ONE_FP/new_agg_weight; @@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q) unsigned long old_vslot = q->oldV >> q->min_slot_shift; if (vslot != old_vslot) { - unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; + unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; qfq_move_groups(q, mask, IR, ER); qfq_move_groups(q, mask, IB, EB); } @@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, /* Update F according to the actual service received by the aggregate. */ static inline void charge_actual_service(struct qfq_aggregate *agg) { - /* compute the service received by the aggregate */ - u32 service_received = agg->initial_budget - agg->budget; + /* Compute the service received by the aggregate, taking into + * account that, after decreasing the number of classes in + * agg, it may happen that + * agg->initial_budget - agg->budget > agg->bugdetmax + */ + u32 service_received = min(agg->budgetmax, + agg->initial_budget - agg->budget); agg->F = agg->S + (u64)service_received * agg->inv_w; } +static inline void qfq_update_agg_ts(struct qfq_sched *q, + struct qfq_aggregate *agg, + enum update_reason reason); + +static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); + static struct sk_buff *qfq_dequeue(struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); @@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) in_serv_agg->initial_budget = in_serv_agg->budget = in_serv_agg->budgetmax; - if (!list_empty(&in_serv_agg->active)) + if (!list_empty(&in_serv_agg->active)) { /* * Still active: reschedule for * service. Possible optimization: if no other @@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) * handle it, we would need to maintain an * extra num_active_aggs field. */ - qfq_activate_agg(q, in_serv_agg, requeue); - else if (sch->q.qlen == 0) { /* no aggregate to serve */ + qfq_update_agg_ts(q, in_serv_agg, requeue); + qfq_schedule_agg(q, in_serv_agg); + } else if (sch->q.qlen == 0) { /* no aggregate to serve */ q->in_serv_agg = NULL; return NULL; } @@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) qdisc_bstats_update(sch, skb); agg_dequeue(in_serv_agg, cl, len); - in_serv_agg->budget -= len; + /* If lmax is lowered, through qfq_change_class, for a class + * owning pending packets with larger size than the new value + * of lmax, then the following condition may hold. + */ + if (unlikely(in_serv_agg->budget < len)) + in_serv_agg->budget = 0; + else + in_serv_agg->budget -= len; + q->V += (u64)len * IWSUM; pr_debug("qfq dequeue: len %u F %lld now %lld\n", len, (unsigned long long) in_serv_agg->F, @@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = agg->lmax; list_add_tail(&cl->alist, &agg->active); - if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) - return err; /* aggregate was not empty, nothing else to do */ + if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || + q->in_serv_agg == agg) + return err; /* non-empty or in service, nothing else to do */ - /* recharge budget */ - agg->initial_budget = agg->budget = agg->budgetmax; - - qfq_update_agg_ts(q, agg, enqueue); - if (q->in_serv_agg == NULL) - q->in_serv_agg = agg; - else if (agg != q->in_serv_agg) - qfq_schedule_agg(q, agg); + qfq_activate_agg(q, agg, enqueue); return err; } @@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) /* group was surely ineligible, remove */ __clear_bit(grp->index, &q->bitmaps[IR]); __clear_bit(grp->index, &q->bitmaps[IB]); - } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) + } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && + q->in_serv_agg == NULL) q->V = roundedS; grp->S = roundedS; @@ -1284,8 +1303,15 @@ skip_update: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, enum update_reason reason) { + agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ + qfq_update_agg_ts(q, agg, reason); - qfq_schedule_agg(q, agg); + if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ + q->in_serv_agg = agg; /* start serving this aggregate */ + /* update V: to be in service, agg must be eligible */ + q->oldV = q->V = agg->S; + } else if (agg != q->in_serv_agg) + qfq_schedule_agg(q, agg); } static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, @@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) __set_bit(grp->index, &q->bitmaps[s]); } } - - qfq_update_eligible(q); } static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f7d34e7b6f81..5ead60550895 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd, else { int N, i; + /* + * NOTE: we skip uid_valid()/gid_valid() checks here: + * instead, * -1 id's are later mapped to the + * (export-specific) anonymous id by nfsd_setuser. + * + * (But supplementary gid's get no such special + * treatment so are checked for validity here.) + */ /* uid */ rsci.cred.cr_uid = make_kuid(&init_user_ns, id); - if (!uid_valid(rsci.cred.cr_uid)) - goto out; /* gid */ if (get_int(&mesg, &id)) goto out; rsci.cred.cr_gid = make_kgid(&init_user_ns, id); - if (!gid_valid(rsci.cred.cr_gid)) - goto out; /* number of additional gid's */ if (get_int(&mesg, &N)) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index a0f48a51e14e..a9129f8d7070 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1175,6 +1175,7 @@ static struct file_system_type rpc_pipe_fs_type = { .kill_sb = rpc_kill_sb, }; MODULE_ALIAS_FS("rpc_pipefs"); +MODULE_ALIAS("rpc_pipefs"); static void init_once(void *foo) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c1d8476b7692..3d02130828da 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt) xs_tcp_shutdown(xprt); } +static void xs_local_destroy(struct rpc_xprt *xprt) +{ + xs_close(xprt); + xs_free_peer_addresses(xprt); + xprt_free(xprt); + module_put(THIS_MODULE); +} + /** * xs_destroy - prepare to shutdown a transport * @xprt: doomed transport @@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt) cancel_delayed_work_sync(&transport->connect_worker); - xs_close(xprt); - xs_free_peer_addresses(xprt); - xprt_free(xprt); - module_put(THIS_MODULE); + xs_local_destroy(xprt); } static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) @@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = { .send_request = xs_local_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_def, .close = xs_close, - .destroy = xs_destroy, + .destroy = xs_local_destroy, .print_stats = xs_local_print_stats, }; diff --git a/net/wireless/core.c b/net/wireless/core.c index 5ffff039b017..ea4155fe9733 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) rdev->wiphy.rts_threshold = (u32) -1; rdev->wiphy.coverage_class = 0; - rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | - NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; + rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; return &rdev->wiphy; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e652d05ff712..d44ab216c0ec 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, if ((chan->flags & IEEE80211_CHAN_RADAR) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) - goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power))) @@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag dev->wiphy.max_acl_mac_addrs)) goto nla_put_failure; - if (dev->wiphy.extended_capabilities && - (nla_put(msg, NL80211_ATTR_EXT_CAPA, - dev->wiphy.extended_capabilities_len, - dev->wiphy.extended_capabilities) || - nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, - dev->wiphy.extended_capabilities_len, - dev->wiphy.extended_capabilities_mask))) - goto nla_put_failure; - return genlmsg_end(msg, hdr); nla_put_failure: @@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { - int idx = 0; + int idx = 0, ret; int start = cb->args[0]; struct cfg80211_registered_device *dev; @@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) continue; if (++idx <= start) continue; - if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - dev) < 0) { + ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + dev); + if (ret < 0) { + /* + * If sending the wiphy data didn't fit (ENOBUFS or + * EMSGSIZE returned), this SKB is still empty (so + * it's not too big because another wiphy dataset is + * already in the skb) and we've not tried to adjust + * the dump allocation yet ... then adjust the alloc + * size to be bigger, and return 1 but with the empty + * skb. This results in an empty message being RX'ed + * in userspace, but that is ignored. + * + * We can then retry with the larger buffer. + */ + if ((ret == -ENOBUFS || ret == -EMSGSIZE) && + !skb->len && + cb->min_dump_alloc < 4096) { + cb->min_dump_alloc = 4096; + mutex_unlock(&cfg80211_mutex); + return 1; + } idx--; break; } @@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; struct cfg80211_registered_device *dev = info->user_ptr[0]; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(4096, GFP_KERNEL); if (!msg) return -ENOMEM; |