summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 16:30:30 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 16:30:30 +0900
commit5e30025a319910695f5010dc0fb53a23299da14d (patch)
tree4292bcf78de221c7de1774ccf5ad0ac5a9315c26 /net
parent7971e23a66c94f1b9bd2d64a3e86dfbfa8c60121 (diff)
parent90d3839b90fe379557dae4a44735a6af78f42885 (diff)
downloadtalos-obmc-linux-5e30025a319910695f5010dc0fb53a23299da14d.tar.gz
talos-obmc-linux-5e30025a319910695f5010dc0fb53a23299da14d.zip
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar: "The biggest changes: - add lockdep support for seqcount/seqlocks structures, this unearthed both bugs and required extra annotation. - move the various kernel locking primitives to the new kernel/locking/ directory" * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) block: Use u64_stats_init() to initialize seqcounts locking/lockdep: Mark __lockdep_count_forward_deps() as static lockdep/proc: Fix lock-time avg computation locking/doc: Update references to kernel/mutex.c ipv6: Fix possible ipv6 seqlock deadlock cpuset: Fix potential deadlock w/ set_mems_allowed seqcount: Add lockdep functionality to seqcount/seqlock structures net: Explicitly initialize u64_stats_sync structures for lockdep locking: Move the percpu-rwsem code to kernel/locking/ locking: Move the lglocks code to kernel/locking/ locking: Move the rwsem code to kernel/locking/ locking: Move the rtmutex code to kernel/locking/ locking: Move the semaphore core to kernel/locking/ locking: Move the spinlock code to kernel/locking/ locking: Move the lockdep code to kernel/locking/ locking: Move the mutex code to kernel/locking/ hung_task debugging: Add tracepoint to report the hang x86/locking/kconfig: Update paravirt spinlock Kconfig description lockstat: Report avg wait and hold times lockdep, x86/alternatives: Drop ancient lockdep fixup message ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/bridge/br_device.c7
-rw-r--r--net/ipv4/af_inet.c14
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv6/addrconf.c14
-rw-r--r--net/ipv6/af_inet6.c14
-rw-r--r--net/ipv6/ip6_gre.c15
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c7
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c25
-rw-r--r--net/openvswitch/datapath.c6
-rw-r--r--net/openvswitch/vport.c8
13 files changed, 138 insertions, 6 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 8db1b985dbf1..762896ebfcf5 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -539,7 +539,7 @@ static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- int subclass = 0;
+ int subclass = 0, i;
netif_carrier_off(dev);
@@ -593,6 +593,13 @@ static int vlan_dev_init(struct net_device *dev)
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct vlan_pcpu_stats *vlan_stat;
+ vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+ u64_stats_init(&vlan_stat->syncp);
+ }
+
+
return 0;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e6b7fecb3af1..f00cfd2a0143 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -88,11 +88,18 @@ out:
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
+ int i;
br->stats = alloc_percpu(struct br_cpu_netstats);
if (!br->stats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct br_cpu_netstats *br_dev_stats;
+ br_dev_stats = per_cpu_ptr(br->stats, i);
+ u64_stats_init(&br_dev_stats->syncp);
+ }
+
return 0;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 68af9aac91d0..70011e029ac1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1503,6 +1503,7 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
ptr[0] = __alloc_percpu(mibsize, align);
if (!ptr[0])
return -ENOMEM;
+
#if SNMP_ARRAY_SZ == 2
ptr[1] = __alloc_percpu(mibsize, align);
if (!ptr[1]) {
@@ -1547,6 +1548,8 @@ static const struct net_protocol icmp_protocol = {
static __net_init int ipv4_mib_init_net(struct net *net)
{
+ int i;
+
if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
sizeof(struct tcp_mib),
__alignof__(struct tcp_mib)) < 0)
@@ -1555,6 +1558,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
goto err_ip_mib;
+
+ for_each_possible_cpu(i) {
+ struct ipstats_mib *af_inet_stats;
+ af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
+ u64_stats_init(&af_inet_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+ af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
+ u64_stats_init(&af_inet_stats->syncp);
+#endif
+ }
+
if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
sizeof(struct linux_mib),
__alignof__(struct linux_mib)) < 0)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 63a6d6d6b875..caf01176a5e4 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -976,13 +976,19 @@ int ip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- int err;
+ int i, err;
dev->destructor = ip_tunnel_dev_free;
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *ipt_stats;
+ ipt_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&ipt_stats->syncp);
+ }
+
err = gro_cells_init(&tunnel->gro_cells, dev);
if (err) {
free_percpu(dev->tstats);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 542d09561ed6..5658d9d51637 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -271,10 +271,24 @@ static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
static int snmp6_alloc_dev(struct inet6_dev *idev)
{
+ int i;
+
if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
goto err_ip;
+
+ for_each_possible_cpu(i) {
+ struct ipstats_mib *addrconf_stats;
+ addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i);
+ u64_stats_init(&addrconf_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+ addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
+ u64_stats_init(&addrconf_stats->syncp);
+#endif
+ }
+
+
idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
GFP_KERNEL);
if (!idev->stats.icmpv6dev)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6468bda1f2b9..ff75313f27a8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -714,6 +714,8 @@ static void ipv6_packet_cleanup(void)
static int __net_init ipv6_init_mibs(struct net *net)
{
+ int i;
+
if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
@@ -726,6 +728,18 @@ static int __net_init ipv6_init_mibs(struct net *net)
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
goto err_ip_mib;
+
+ for_each_possible_cpu(i) {
+ struct ipstats_mib *af_inet6_stats;
+ af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i);
+ u64_stats_init(&af_inet6_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+ af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
+ u64_stats_init(&af_inet6_stats->syncp);
+#endif
+ }
+
+
if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
sizeof(struct icmpv6_mib),
__alignof__(struct icmpv6_mib)) < 0)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index bf4a9a084de5..8acb28621f9c 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1252,6 +1252,7 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
static int ip6gre_tunnel_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
+ int i;
tunnel = netdev_priv(dev);
@@ -1269,6 +1270,13 @@ static int ip6gre_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *ip6gre_tunnel_stats;
+ ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&ip6gre_tunnel_stats->syncp);
+ }
+
+
return 0;
}
@@ -1449,6 +1457,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
static int ip6gre_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
+ int i;
tunnel = netdev_priv(dev);
@@ -1462,6 +1471,12 @@ static int ip6gre_tap_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *ip6gre_tap_stats;
+ ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&ip6gre_tap_stats->syncp);
+ }
+
return 0;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5e31a909a2b0..59df872e2f4d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -910,7 +910,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
out_err_release:
if (err == -ENETUNREACH)
- IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
dst_release(*dst);
*dst = NULL;
return err;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 583b77e2f69b..df1fa58528c6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1494,12 +1494,19 @@ static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
+ int i;
t->dev = dev;
t->net = dev_net(dev);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *ip6_tnl_stats;
+ ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&ip6_tnl_stats->syncp);
+ }
return 0;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3a9038dd818d..bfc6fcea3841 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1320,6 +1320,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
static int ipip6_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ int i;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
@@ -1332,6 +1333,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *ipip6_tunnel_stats;
+ ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&ipip6_tunnel_stats->syncp);
+ }
+
return 0;
}
@@ -1341,6 +1348,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
struct iphdr *iph = &tunnel->parms.iph;
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
+ int i;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
@@ -1354,6 +1362,13 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *ipip6_fb_stats;
+ ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
+ u64_stats_init(&ipip6_fb_stats->syncp);
+ }
+
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 62786a495cea..1ded5c6d268c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
struct ip_vs_dest **dest_p)
{
struct ip_vs_dest *dest;
- unsigned int atype;
+ unsigned int atype, i;
EnterFunction(2);
@@ -869,6 +869,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
if (!dest->stats.cpustats)
goto err_alloc;
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *ip_vs_dest_stats;
+ ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i);
+ u64_stats_init(&ip_vs_dest_stats->syncp);
+ }
+
dest->af = svc->af;
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
@@ -1134,7 +1140,7 @@ static int
ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
- int ret = 0;
+ int ret = 0, i;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
@@ -1184,6 +1190,13 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err;
}
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *ip_vs_stats;
+ ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i);
+ u64_stats_init(&ip_vs_stats->syncp);
+ }
+
+
/* I'm the first user of the service */
atomic_set(&svc->refcnt, 0);
@@ -3780,7 +3793,7 @@ static struct notifier_block ip_vs_dst_notifier = {
int __net_init ip_vs_control_net_init(struct net *net)
{
- int idx;
+ int i, idx;
struct netns_ipvs *ipvs = net_ipvs(net);
/* Initialize rs_table */
@@ -3799,6 +3812,12 @@ int __net_init ip_vs_control_net_init(struct net *net)
if (!ipvs->tot_stats.cpustats)
return -ENOMEM;
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *ipvs_tot_stats;
+ ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i);
+ u64_stats_init(&ipvs_tot_stats->syncp);
+ }
+
spin_lock_init(&ipvs->tot_stats.lock);
proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 1408adc2a2a7..449e0776a2c0 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1199,6 +1199,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_table;
}
+ for_each_possible_cpu(i) {
+ struct dp_stats_percpu *dpath_stats;
+ dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
+ u64_stats_init(&dpath_stats->sync);
+ }
+
dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!dp->ports) {
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6f65dbe13812..d830a95f03a4 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -118,6 +118,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
{
struct vport *vport;
size_t alloc_size;
+ int i;
alloc_size = sizeof(struct vport);
if (priv_size) {
@@ -141,6 +142,13 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
return ERR_PTR(-ENOMEM);
}
+ for_each_possible_cpu(i) {
+ struct pcpu_tstats *vport_stats;
+ vport_stats = per_cpu_ptr(vport->percpu_stats, i);
+ u64_stats_init(&vport_stats->syncp);
+ }
+
+
spin_lock_init(&vport->stats_lock);
return vport;
OpenPOWER on IntegriCloud