summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-07-10 23:56:33 -0700
committerDavid S. Miller <davem@davemloft.net>2012-07-10 23:56:33 -0700
commit04c9f416e371cff076a8b3279fb213628915d059 (patch)
tree2b64cb835cbc9d19d2d06f1e7618615d40ada0af /net
parentc278fa53c123282f753b2264fc62c0e9502a32fa (diff)
parentc1f5163de417dab01fa9daaf09a74bbb19303f3c (diff)
downloadblackbird-op-linux-04c9f416e371cff076a8b3279fb213628915d059.tar.gz
blackbird-op-linux-04c9f416e371cff076a8b3279fb213628915d059.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/batman-adv/bridge_loop_avoidance.c net/batman-adv/bridge_loop_avoidance.h net/batman-adv/soft-interface.c net/mac80211/mlme.c With merge help from Antonio Quartulli (batman-adv) and Stephen Rothwell (drivers/net/usb/qmi_wwan.c). The net/mac80211/mlme.c conflict seemed easy enough, accounting for a conversion to some new tracing macros. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c15
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h6
-rw-r--r--net/batman-adv/soft-interface.c6
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/netprio_cgroup.c7
-rw-r--r--net/ieee802154/dgram.c12
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/netfilter/xt_set.c4
-rw-r--r--net/nfc/llcp/sock.c2
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/sched/sch_netem.c42
13 files changed, 62 insertions, 53 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6089f0cf23b4..9096bcb08132 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
break;
case NETDEV_DOWN:
+ if (dev->features & NETIF_F_HW_VLAN_FILTER)
+ vlan_vid_del(dev, 0);
+
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_N_VID; i++) {
vlandev = vlan_group_get_device(grp, i);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 3483e4035cbe..6705d35b17ce 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1381,6 +1381,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
+ * @is_bcast: the packet came in a broadcast packet type.
*
* bla_rx avoidance checks if:
* * we have to race for a claim
@@ -1390,7 +1391,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
*/
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
+ bool is_bcast)
{
struct ethhdr *ethhdr;
struct batadv_claim search_claim, *claim = NULL;
@@ -1409,7 +1411,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
/* don't allow broadcasts while requests are in flight */
- if (is_multicast_ether_addr(ethhdr->h_dest))
+ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
goto handled;
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@@ -1435,8 +1437,13 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
}
/* if it is a broadcast ... */
- if (is_multicast_ether_addr(ethhdr->h_dest)) {
- /* ... drop it. the responsible gateway is in charge. */
+ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+ /* ... drop it. the responsible gateway is in charge.
+ *
+ * We need to check is_bcast because with the gateway
+ * feature, broadcasts (like DHCP requests) may be sent
+ * using a unicast packet type.
+ */
goto handled;
} else {
/* seems the client considers us as its best gateway.
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 08d13cb1e3df..563cfbf94a7f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -21,7 +21,8 @@
#define _NET_BATMAN_ADV_BLA_H_
#ifdef CONFIG_BATMAN_ADV_BLA
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
+ bool is_bcast);
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
int batadv_bla_is_backbone_gw(struct sk_buff *skb,
struct batadv_orig_node *orig_node, int hdr_size);
@@ -40,7 +41,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
- struct sk_buff *skb, short vid)
+ struct sk_buff *skb, short vid,
+ bool is_bcast)
{
return 0;
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9e4bb61301ec..109ea2aae96c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -267,8 +267,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
+ struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
short vid __maybe_unused = -1;
__be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
+ bool is_bcast;
+
+ is_bcast = (batadv_header->packet_type == BATADV_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -315,7 +319,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
- if (batadv_bla_rx(bat_priv, skb, vid))
+ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
goto out;
netif_rx(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 5ab6f4b37c0c..73e87c7b4377 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2457,8 +2457,12 @@ static void skb_update_prio(struct sk_buff *skb)
{
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
- if ((!skb->priority) && (skb->sk) && map)
- skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+ if (!skb->priority && skb->sk && map) {
+ unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
+ }
}
#else
#define skb_update_prio(skb)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5b8aa2fae48b..3e953eaddbfc 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
return -ENOSPC;
}
set_bit(prioidx, prioidx_map);
+ if (atomic_read(&max_prioidx) < prioidx)
+ atomic_set(&max_prioidx, prioidx);
spin_unlock_irqrestore(&prioidx_map_lock, flags);
- atomic_set(&max_prioidx, prioidx);
*prio = prioidx;
return 0;
}
@@ -141,7 +142,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
rtnl_lock();
for_each_netdev(&init_net, dev) {
map = rtnl_dereference(dev->priomap);
- if (map)
+ if (map && cs->prioidx < map->priomap_len)
map->priomap[cs->prioidx] = 0;
}
rtnl_unlock();
@@ -165,7 +166,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
map = rcu_dereference(dev->priomap);
- priority = map ? map->priomap[prioidx] : 0;
+ priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
cb->fill(cb, dev->name, priority);
}
rcu_read_unlock();
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 6fbb2ad7bb6d..16705611589a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
mtu = dev->mtu;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
+ if (size > mtu) {
+ pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+ err = -EINVAL;
+ goto out_dev;
+ }
+
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err < 0)
goto out_skb;
- if (size > mtu) {
- pr_debug("size = %Zu, mtu = %u\n", size, mtu);
- err = -EINVAL;
- goto out_skb;
- }
-
skb->dev = dev;
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e6fe84a08443..aa69a331f374 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2152,15 +2152,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
mgmt->sa, status_code);
ieee80211_destroy_assoc_data(sdata, false);
} else {
- sdata_info(sdata, "associated\n");
-
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
/* oops -- internal error -- send timeout for now */
- ieee80211_destroy_assoc_data(sdata, true);
- sta_info_destroy_addr(sdata, mgmt->bssid);
+ ieee80211_destroy_assoc_data(sdata, false);
cfg80211_put_bss(*bss);
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
}
+ sdata_info(sdata, "associated\n");
/*
* destroy assoc_data afterwards, as otherwise an idle
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2d1acc6c5445..f9e51ef8dfa2 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
max_rates = sband->n_bitrates;
}
- msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+ msp = kzalloc(sizeof(*msp), gfp);
if (!msp)
return NULL;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 035960ec5cb9..c6f7db720d84 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -16,6 +16,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
info->del_set.flags, 0, UINT_MAX);
/* Normalize to fit into jiffies */
- if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+ if (add_opt.timeout != IPSET_NO_TIMEOUT &&
+ add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 2c0b317344b7..05ca5a680071 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
pr_debug("%p\n", sk);
- if (llcp_sock == NULL)
+ if (llcp_sock == NULL || llcp_sock->dev == NULL)
return -EBADFD;
addr->sa_family = AF_NFC;
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 2754f098d436..bebaa43484bc 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -229,7 +229,7 @@ found_UDP_peer:
return peer;
new_UDP_peer:
- _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
+ _net("Rx UDP DGRAM from NEW peer");
read_unlock_bh(&rxrpc_peer_lock);
_leave(" = -EBUSY [new]");
return ERR_PTR(-EBUSY);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c2..c412ad0d0308 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
return PSCHED_NS2TICKS(ticks);
}
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct sk_buff_head *list = &sch->q;
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
- struct sk_buff *skb;
-
- if (likely(skb_queue_len(list) < sch->limit)) {
- skb = skb_peek_tail(list);
- /* Optimize for add at tail */
- if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
- return qdisc_enqueue_tail(nskb, sch);
+ struct sk_buff *skb = skb_peek_tail(list);
- skb_queue_reverse_walk(list, skb) {
- if (tnext >= netem_skb_cb(skb)->time_to_send)
- break;
- }
+ /* Optimize for add at tail */
+ if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+ return __skb_queue_tail(list, nskb);
- __skb_queue_after(list, skb, nskb);
- sch->qstats.backlog += qdisc_pkt_len(nskb);
- return NET_XMIT_SUCCESS;
+ skb_queue_reverse_walk(list, skb) {
+ if (tnext >= netem_skb_cb(skb)->time_to_send)
+ break;
}
- return qdisc_reshape_fail(nskb, sch);
+ __skb_queue_after(list, skb, nskb);
}
/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
- int ret;
int count = 1;
/* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
+ if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ return qdisc_reshape_fail(skb, sch);
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cb->time_to_send = now + delay;
++q->counter;
- ret = tfifo_enqueue(skb, sch);
+ tfifo_enqueue(skb, sch);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->counter = 0;
__skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
sch->qstats.requeues++;
- ret = NET_XMIT_SUCCESS;
- }
-
- if (ret != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
- return ret;
- }
}
return NET_XMIT_SUCCESS;
OpenPOWER on IntegriCloud