summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/9p/client.c70
-rw-r--r--net/9p/protocol.c8
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/hci_conn.c5
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/l2cap.c14
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_fdb.c6
-rw-r--r--net/bridge/br_forward.c27
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/caif/Kconfig5
-rw-r--r--net/caif/caif_socket.c91
-rw-r--r--net/caif/cfctrl.c92
-rw-r--r--net/caif/cfmuxl.c3
-rw-r--r--net/caif/cfpkt_skbuff.c25
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c6
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c120
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/ethtool.c41
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/net-sysfs.c63
-rw-r--r--net/core/net-sysfs.h1
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c30
-rw-r--r--net/core/skbuff.c89
-rw-r--r--net/core/sock.c52
-rw-r--r--net/dccp/input.c6
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/ieee802154/wpan-class.c7
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/ip_output.c9
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c26
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c14
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/mip6.c3
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/x_tables.c17
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/netlink/af_netlink.c21
-rw-r--r--net/phonet/pep.c9
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/sched/act_mirred.c43
-rw-r--r--net/sched/act_nat.c9
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_cgroup.c50
-rw-r--r--net/sched/cls_u32.c49
-rw-r--r--net/sched/sch_api.c14
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/socket.c9
-rw-r--r--net/sunrpc/cache.c13
-rw-r--r--net/sunrpc/rpc_pipe.c18
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/xprt.c5
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c19
89 files changed, 876 insertions, 491 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index bd537fc10254..50f58f5f1c34 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
return NET_RX_DROP;
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
- goto drop;
+ skb->deliver_no_wcard = 1;
skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
struct sk_buff *p;
if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
- goto drop;
+ skb->deliver_no_wcard = 1;
skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 55be90826f5f..529842677817 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev)
netif_carrier_off(dev);
/* IFF_BROADCAST|IFF_MULTICAST; ??? */
- dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
+ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_MASTER | IFF_SLAVE);
dev->iflink = real_dev->ifindex;
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) |
diff --git a/net/9p/client.c b/net/9p/client.c
index 0aa79faa9850..37c8da07a80b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1321,7 +1321,8 @@ static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
if (wst->muid)
ret += strlen(wst->muid);
- if (proto_version == p9_proto_2000u) {
+ if ((proto_version == p9_proto_2000u) ||
+ (proto_version == p9_proto_2000L)) {
ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */
if (wst->extension)
ret += strlen(wst->extension);
@@ -1364,3 +1365,70 @@ error:
return err;
}
EXPORT_SYMBOL(p9_client_wstat);
+
+int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
+{
+ int err;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ err = 0;
+ clnt = fid->clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
+
+ req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type,
+ &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
+ &sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ p9_free_req(clnt, req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
+ "blocks %llu bfree %llu bavail %llu files %llu ffree %llu "
+ "fsid %llu namelen %ld\n",
+ fid->fid, (long unsigned int)sb->type, (long int)sb->bsize,
+ sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree,
+ sb->fsid, (long int)sb->namelen);
+
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_statfs);
+
+int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name)
+{
+ int err;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ err = 0;
+ clnt = fid->clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
+ fid->fid, newdirfid->fid, name);
+
+ req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
+ newdirfid->fid, name);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
+
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_rename);
+
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index e7541d5b0118..149f82160130 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -341,7 +341,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case '?':
- if (proto_version != p9_proto_2000u)
+ if ((proto_version != p9_proto_2000u) &&
+ (proto_version != p9_proto_2000L))
return 0;
break;
default:
@@ -393,7 +394,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
const char *sptr = va_arg(ap, const char *);
int16_t len = 0;
if (sptr)
- len = MIN(strlen(sptr), USHORT_MAX);
+ len = MIN(strlen(sptr), USHRT_MAX);
errcode = p9pdu_writef(pdu, proto_version,
"w", len);
@@ -488,7 +489,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case '?':
- if (proto_version != p9_proto_2000u)
+ if ((proto_version != p9_proto_2000u) &&
+ (proto_version != p9_proto_2000L))
return 0;
break;
default:
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 7eb78ecc1618..dcfbe99ff81c 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
- while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
+ while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req->status = REQ_STATUS_SENT;
- if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
+ if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: virtio rpc add_buf returned failure");
return -EIO;
}
- chan->vq->vq_ops->kick(chan->vq);
+ virtqueue_kick(chan->vq);
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
return 0;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 0faad5ce6dc4..8c100c9dae28 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -104,6 +104,8 @@ static void bnep_net_set_mc_list(struct net_device *dev)
break;
memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
+
+ i++;
}
r->len = htons(skb->len - len);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b10e3cdb08f8..800b6b9fbbae 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
acl->sec_level = sec_level;
acl->auth_type = auth_type;
hci_acl_connect(acl);
+ } else {
+ if (acl->sec_level < sec_level)
+ acl->sec_level = sec_level;
+ if (acl->auth_type < auth_type)
+ acl->auth_type = auth_type;
}
if (type == ACL_LINK)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6c57fc71c7e2..786b5de0bac4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
if (conn) {
if (!ev->status)
conn->link_mode |= HCI_LM_AUTH;
+ else
+ conn->sec_level = BT_SECURITY_LOW;
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1b682a5aa061..cf3c4073a8a6 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
l2cap_send_sframe(pi, control);
}
+static inline int __l2cap_no_conn_pending(struct sock *sk)
+{
+ return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
+}
+
static void l2cap_do_start(struct sock *sk)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk)
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_check_security(sk)) {
+ if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
struct l2cap_conn_req req;
req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
L2CAP_CONN_REQ, sizeof(req), &req);
@@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
}
if (sk->sk_state == BT_CONNECT) {
- if (l2cap_check_security(sk)) {
+ if (l2cap_check_security(sk) &&
+ __l2cap_no_conn_pending(sk)) {
struct l2cap_conn_req req;
req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
L2CAP_CONN_REQ, sizeof(req), &req);
@@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
l2cap_pi(sk)->ident = 0;
l2cap_pi(sk)->dcid = dcid;
l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-
l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
@@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
req.psm = l2cap_pi(sk)->psm;
l2cap_pi(sk)->ident = l2cap_get_ident(conn);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
L2CAP_CONN_REQ, sizeof(req), &req);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c94820e..753fc4221f3c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br)
return count != 0 && ret;
}
-static void br_poll_controller(struct net_device *br_dev)
-{
- struct netpoll *np = br_dev->npinfo->netpoll;
-
- if (np->real_dev != br_dev)
- netpoll_poll_dev(np->real_dev);
-}
-
void br_netpoll_cleanup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
@@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_cleanup = br_netpoll_cleanup,
- .ndo_poll_controller = br_poll_controller,
#endif
};
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 26637439965b..b01dde35a69e 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data)
{
struct net_bridge *br = (struct net_bridge *)_data;
unsigned long delay = hold_time(br);
- unsigned long next_timer = jiffies + br->forward_delay;
+ unsigned long next_timer = jiffies + br->ageing_time;
int i;
spin_lock_bh(&br->hash_lock);
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data)
}
spin_unlock_bh(&br->hash_lock);
- /* Add HZ/4 to ensure we round the jiffies upwards to be after the next
- * timer, otherwise we might round down and will have no-op run. */
- mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4));
+ mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
}
/* Completely flush all dynamic entries in forwarding database.*/
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a98ef1393097..595da45f9088 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
- netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
- skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
- } else
-#endif
- dev_queue_xmit(skb);
+ dev_queue_xmit(skb);
}
}
@@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb)
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
-#ifdef CONFIG_NET_POLL_CONTROLLER
- struct net_bridge *br = to->br;
- if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
- struct netpoll *np;
- to->dev->npinfo = skb->dev->npinfo;
- np = skb->dev->npinfo->netpoll;
- np->real_dev = np->dev = to->dev;
- to->dev->priv_flags |= IFF_IN_NETPOLL;
- }
-#endif
skb->dev = to->dev;
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (skb->dev->npinfo)
- skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -140,10 +119,10 @@ static int deliver_clone(const struct net_bridge_port *prev,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
- struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
-
dev->stats.tx_dropped++;
return -ENOMEM;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9d21d98ae5fa..27ae946363f1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
return NULL;
}
+static struct net_bridge_mdb_entry *br_mdb_ip_get(
+ struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
+{
+ if (!mdb)
+ return NULL;
+
+ return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
+}
+
static struct net_bridge_mdb_entry *br_mdb_ip4_get(
struct net_bridge_mdb_htable *mdb, __be32 dst)
{
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
br_dst.u.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
- return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst));
+ return br_mdb_ip_get(mdb, &br_dst);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
ipv6_addr_copy(&br_dst.u.ip6, dst);
br_dst.proto = htons(ETH_P_IPV6);
- return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst));
+ return br_mdb_ip_get(mdb, &br_dst);
}
#endif
-static struct net_bridge_mdb_entry *br_mdb_ip_get(
- struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
-{
- return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
-}
-
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb)
{
struct net_bridge_mdb_htable *mdb = br->mdb;
struct br_ip ip;
- if (!mdb || br->multicast_disabled)
+ if (br->multicast_disabled)
return NULL;
if (BR_INPUT_SKB_CB(skb)->igmp)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..8fb75f89c4aa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -591,6 +591,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
pskb_trim_rcsum(skb, len);
+ /* BUG: Should really parse the IP options here. */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index dd321e39e621..486b8f3861d2 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -659,7 +659,7 @@ static struct attribute_group bridge_group = {
*
* Returns the number of bytes read.
*/
-static ssize_t brforward_read(struct kobject *kobj,
+static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index cd1daf6008bd..ed651786f16b 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -2,10 +2,8 @@
# CAIF net configurations
#
-#menu "CAIF Support"
-comment "CAIF Support"
menuconfig CAIF
- tristate "Enable CAIF support"
+ tristate "CAIF support"
select CRC_CCITT
default n
---help---
@@ -45,4 +43,3 @@ config CAIF_NETDEV
If unsure say Y.
endif
-#endmenu
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index c3a70c5c893a..3d0e09584fae 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -60,7 +60,7 @@ struct debug_fs_counter {
atomic_t num_rx_flow_off;
atomic_t num_rx_flow_on;
};
-struct debug_fs_counter cnt;
+static struct debug_fs_counter cnt;
#define dbfs_atomic_inc(v) atomic_inc(v)
#define dbfs_atomic_dec(v) atomic_dec(v)
#else
@@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk)
mutex_unlock(&cf_sk->readlock);
}
-int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
+static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
{
/* A quarter of full buffer is used a low water mark */
return cf_sk->sk.sk_rcvbuf / 4;
}
-void caif_flow_ctrl(struct sock *sk, int mode)
+static void caif_flow_ctrl(struct sock *sk, int mode)
{
struct caifsock *cf_sk;
cf_sk = container_of(sk, struct caifsock, sk);
- if (cf_sk->layer.dn)
+ if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
}
@@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode)
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
* not dropped, but CAIF is sending flow off instead.
*/
-int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
int skb_len;
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
- if (cf_sk->layer.dn)
- cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
- CAIF_MODEMCMD_FLOW_OFF_REQ);
+ dbfs_atomic_inc(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
err = sk_filter(sk, skb);
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
trace_printk("CAIF: %s():"
" sending flow OFF due to rmem_schedule\n",
__func__);
- if (cf_sk->layer.dn)
- cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
- CAIF_MODEMCMD_FLOW_OFF_REQ);
+ dbfs_atomic_inc(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
skb->dev = NULL;
skb_set_owner_r(skb, sk);
@@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk)
{
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
- if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
- return;
if (rx_flow_is_on(cf_sk))
return;
if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
dbfs_atomic_inc(&cnt.num_rx_flow_on);
set_rx_flow_on(cf_sk);
- cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
- CAIF_MODEMCMD_FLOW_ON_REQ);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
}
}
+
/*
- * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
- * has sufficient size.
+ * Copied from unix_dgram_recvmsg, but removed credit checks,
+ * changed locking, address handling and added MSG_TRUNC.
*/
-
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+ struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
- int ret = 0;
- int len;
+ int ret;
+ int copylen;
- if (unlikely(!buf_len))
- return -EINVAL;
+ ret = -EOPNOTSUPP;
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
-
- len = skb->len;
-
- if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
- len = buf_len;
- /*
- * Push skb back on receive queue if buffer too small.
- * This has a built-in race where multi-threaded receive
- * may get packet in wrong order, but multiple read does
- * not really guarantee ordered delivery anyway.
- * Let's optimize for speed without taking locks.
- */
-
- skb_queue_head(&sk->sk_receive_queue, skb);
- ret = -EMSGSIZE;
- goto read_error;
+ copylen = skb->len;
+ if (len < copylen) {
+ m->msg_flags |= MSG_TRUNC;
+ copylen = len;
}
- ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
+ ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
- goto read_error;
+ goto out_free;
+ ret = (flags & MSG_TRUNC) ? skb->len : copylen;
+out_free:
skb_free_datagram(sk, skb);
-
caif_check_flow_release(sk);
-
- return len;
+ return ret;
read_error:
return ret;
@@ -920,17 +904,17 @@ wait_connect:
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
release_sock(sk);
- err = wait_event_interruptible_timeout(*sk_sleep(sk),
+ err = -ERESTARTSYS;
+ timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
sk->sk_state != CAIF_CONNECTING,
timeo);
lock_sock(sk);
- if (err < 0)
+ if (timeo < 0)
goto out; /* -ERESTARTSYS */
- if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
- err = -ETIMEDOUT;
- goto out;
- }
+ err = -ETIMEDOUT;
+ if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
+ goto out;
if (sk->sk_state != CAIF_CONNECTED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk);
@@ -945,7 +929,6 @@ out:
return err;
}
-
/*
* caif_release() - Disconnect a CAIF Socket
* Copied and modified af_irda.c:irda_release().
@@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file,
(sk->sk_shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
- /* Connection-based need to check for termination and startup */
- if (sk->sk_state == CAIF_DISCONNECTED)
- mask |= POLLHUP;
-
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
@@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = {
.owner = THIS_MODULE,
};
-int af_caif_init(void)
+static int af_caif_init(void)
{
int err = sock_register(&caif_family_ops);
if (!err)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 0ffe1e1ce901..fcfda98a5e6d 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void)
dev_info.id = 0xff;
memset(this, 0, sizeof(*this));
cfsrvl_init(&this->serv, 0, &dev_info);
- spin_lock_init(&this->info_list_lock);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
this->serv.layer.receive = cfctrl_recv;
sprintf(this->serv.layer.name, "ctrl");
this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
spin_lock_init(&this->loop_linkid_lock);
+ spin_lock_init(&this->info_list_lock);
+ INIT_LIST_HEAD(&this->list);
this->loop_linkid = 1;
return &this->serv.layer;
}
@@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
void cfctrl_insert_req(struct cfctrl *ctrl,
struct cfctrl_request_info *req)
{
- struct cfctrl_request_info *p;
spin_lock(&ctrl->info_list_lock);
- req->next = NULL;
atomic_inc(&ctrl->req_seq_no);
req->sequence_no = atomic_read(&ctrl->req_seq_no);
- if (ctrl->first_req == NULL) {
- ctrl->first_req = req;
- spin_unlock(&ctrl->info_list_lock);
- return;
- }
- p = ctrl->first_req;
- while (p->next != NULL)
- p = p->next;
- p->next = req;
+ list_add_tail(&req->list, &ctrl->list);
spin_unlock(&ctrl->info_list_lock);
}
@@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
struct cfctrl_request_info *req)
{
- struct cfctrl_request_info *p;
- struct cfctrl_request_info *ret;
+ struct cfctrl_request_info *p, *tmp, *first;
spin_lock(&ctrl->info_list_lock);
- if (ctrl->first_req == NULL) {
- spin_unlock(&ctrl->info_list_lock);
- return NULL;
- }
-
- if (cfctrl_req_eq(req, ctrl->first_req)) {
- ret = ctrl->first_req;
- caif_assert(ctrl->first_req);
- atomic_set(&ctrl->rsp_seq_no,
- ctrl->first_req->sequence_no);
- ctrl->first_req = ctrl->first_req->next;
- spin_unlock(&ctrl->info_list_lock);
- return ret;
- }
+ first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
- p = ctrl->first_req;
-
- while (p->next != NULL) {
- if (cfctrl_req_eq(req, p->next)) {
- pr_warning("CAIF: %s(): Requests are not "
+ list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
+ if (cfctrl_req_eq(req, p)) {
+ if (p != first)
+ pr_warning("CAIF: %s(): Requests are not "
"received in order\n",
__func__);
- ret = p->next;
+
atomic_set(&ctrl->rsp_seq_no,
- p->next->sequence_no);
- p->next = p->next->next;
- spin_unlock(&ctrl->info_list_lock);
- return ret;
+ p->sequence_no);
+ list_del(&p->list);
+ goto out;
}
- p = p->next;
}
+ p = NULL;
+out:
spin_unlock(&ctrl->info_list_lock);
-
- pr_warning("CAIF: %s(): Request does not match\n",
- __func__);
- return NULL;
+ return p;
}
struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
@@ -388,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
{
- struct cfctrl_request_info *p, *req;
+ struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layr);
spin_lock(&ctrl->info_list_lock);
-
- if (ctrl->first_req == NULL) {
- spin_unlock(&ctrl->info_list_lock);
- return;
- }
-
- if (ctrl->first_req->client_layer == adap_layer) {
-
- req = ctrl->first_req;
- ctrl->first_req = ctrl->first_req->next;
- kfree(req);
- }
-
- p = ctrl->first_req;
- while (p != NULL && p->next != NULL) {
- if (p->next->client_layer == adap_layer) {
-
- req = p->next;
- p->next = p->next->next;
- kfree(p->next);
+ pr_warning("CAIF: %s(): enter\n", __func__);
+
+ list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
+ if (p->client_layer == adap_layer) {
+ pr_warning("CAIF: %s(): cancel req :%d\n", __func__,
+ p->sequence_no);
+ list_del(&p->list);
+ kfree(p);
}
- p = p->next;
}
spin_unlock(&ctrl->info_list_lock);
@@ -634,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
case CAIF_CTRLCMD_FLOW_OFF_IND:
spin_lock(&this->info_list_lock);
- if (this->first_req != NULL) {
+ if (!list_empty(&this->list)) {
pr_debug("CAIF: %s(): Received flow off in "
"control layer", __func__);
}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 7372f27f1d32..80c8d332b258 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
spin_lock(&muxl->receive_lock);
up = get_up(muxl, id);
if (up == NULL)
- return NULL;
+ goto out;
memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
list_del(&up->node);
cfsrvl_put(up);
+out:
spin_unlock(&muxl->receive_lock);
return up;
}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 83fff2ff6658..a6fdf899741a 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -238,6 +238,7 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
struct sk_buff *lastskb;
u8 *to;
const u8 *data = data2;
+ int ret;
if (unlikely(is_erronous(pkt)))
return -EPROTO;
if (unlikely(skb_headroom(skb) < len)) {
@@ -246,9 +247,10 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
}
/* Make sure data is writable */
- if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ ret = skb_cow_data(skb, 0, &lastskb);
+ if (unlikely(ret < 0)) {
PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
- return -EPROTO;
+ return ret;
}
to = skb_push(skb, len);
@@ -316,6 +318,8 @@ EXPORT_SYMBOL(cfpkt_setlen);
struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
{
struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ if (!pkt)
+ return NULL;
if (unlikely(data != NULL))
cfpkt_add_body(pkt, data, len);
return pkt;
@@ -344,12 +348,13 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
if (dst->tail + neededtailspace > dst->end) {
/* Create a dumplicate of 'dst' with more tail space */
+ struct cfpkt *tmppkt;
dstlen = skb_headlen(dst);
createlen = dstlen + neededtailspace;
- tmp = pkt_to_skb(
- cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
- if (!tmp)
+ tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX);
+ if (tmppkt == NULL)
return NULL;
+ tmp = pkt_to_skb(tmppkt);
skb_set_tail_pointer(tmp, dstlen);
tmp->len = dstlen;
memcpy(tmp->data, dst->data, dstlen);
@@ -368,6 +373,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
{
struct sk_buff *skb2;
struct sk_buff *skb = pkt_to_skb(pkt);
+ struct cfpkt *tmppkt;
u8 *split = skb->data + pos;
u16 len2nd = skb_tail_pointer(skb) - split;
@@ -381,9 +387,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
}
/* Create a new packet for the second part of the data */
- skb2 = pkt_to_skb(
- cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
- PKT_PREFIX));
+ tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
+ PKT_PREFIX);
+ if (tmppkt == NULL)
+ return NULL;
+ skb2 = pkt_to_skb(tmppkt);
+
if (skb2 == NULL)
return NULL;
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830fec935..fd27b172fb5d 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
- if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
pr_err("CAIF: %s():Packet too large - size=%d\n",
__func__, cfpkt_getlen(pkt));
return -EOVERFLOW;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 06029ea2da2f..965c5baace40 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -59,14 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
u8 stx = CFSERL_STX;
int ret;
u16 expectlen = 0;
+
caif_assert(newpkt != NULL);
spin_lock(&layr->sync);
if (layr->incomplete_frm != NULL) {
-
layr->incomplete_frm =
cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
pkt = layr->incomplete_frm;
+ if (pkt == NULL) {
+ spin_unlock(&layr->sync);
+ return -ENOMEM;
+ }
} else {
pkt = newpkt;
}
@@ -154,7 +158,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
if (layr->usestx) {
if (tail_pkt != NULL)
pkt = cfpkt_append(pkt, tail_pkt, 0);
-
/* Start search for next STX if frame failed */
continue;
} else {
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index aff31f34528f..6e5b7079a684 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -123,6 +123,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("CAIF: %s(): Packet is erroneous!\n",
__func__);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f49491..e04f7d964e83 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
return ret;
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
pr_warning("CAIF: %s(): Packet too large - size=%d\n",
__func__, cfpkt_getlen(pkt));
return -EOVERFLOW;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e0097531417a..f5b6f43a4c2e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{
+ bool slow;
+
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
- lock_sock_bh(sk);
+ slow = lock_sock_fast(sk);
skb_orphan(skb);
sk_mem_reclaim_partial(sk);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */
__kfree_skb(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 0aab66d68b19..1f466e82ac33 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -954,18 +954,22 @@ int dev_alloc_name(struct net_device *dev, const char *name)
}
EXPORT_SYMBOL(dev_alloc_name);
-static int dev_get_valid_name(struct net *net, const char *name, char *buf,
- bool fmt)
+static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
{
+ struct net *net;
+
+ BUG_ON(!dev_net(dev));
+ net = dev_net(dev);
+
if (!dev_valid_name(name))
return -EINVAL;
if (fmt && strchr(name, '%'))
- return __dev_alloc_name(net, name, buf);
+ return dev_alloc_name(dev, name);
else if (__dev_get_by_name(net, name))
return -EEXIST;
- else if (buf != name)
- strlcpy(buf, name, IFNAMSIZ);
+ else if (dev->name != name)
+ strlcpy(dev->name, name, IFNAMSIZ);
return 0;
}
@@ -997,20 +1001,15 @@ int dev_change_name(struct net_device *dev, const char *newname)
memcpy(oldname, dev->name, IFNAMSIZ);
- err = dev_get_valid_name(net, newname, dev->name, 1);
+ err = dev_get_valid_name(dev, newname, 1);
if (err < 0)
return err;
rollback:
- /* For now only devices in the initial network namespace
- * are in sysfs.
- */
- if (net_eq(net, &init_net)) {
- ret = device_rename(&dev->dev, dev->name);
- if (ret) {
- memcpy(dev->name, oldname, IFNAMSIZ);
- return ret;
- }
+ ret = device_rename(&dev->dev, dev->name);
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ return ret;
}
write_lock_bh(&dev_base_lock);
@@ -1489,6 +1488,7 @@ static inline void net_timestamp_check(struct sk_buff *skb)
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
skb_orphan(skb);
+ nf_reset(skb);
if (!(dev->flags & IFF_UP) ||
(skb->len > (dev->mtu + dev->hard_header_len))) {
@@ -1554,6 +1554,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
+/*
+ * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
+ * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
+ */
+void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+ unsigned int real_num = dev->real_num_tx_queues;
+
+ if (unlikely(txq > dev->num_tx_queues))
+ ;
+ else if (txq > real_num)
+ dev->real_num_tx_queues = txq;
+ else if (txq < real_num) {
+ dev->real_num_tx_queues = txq;
+ qdisc_reset_all_tx_gt(dev, txq);
+ }
+}
+EXPORT_SYMBOL(netif_set_real_num_tx_queues);
static inline void __netif_reschedule(struct Qdisc *q)
{
@@ -1894,8 +1912,16 @@ static int dev_gso_segment(struct sk_buff *skb)
*/
static inline void skb_orphan_try(struct sk_buff *skb)
{
- if (!skb_tx(skb)->flags)
+ struct sock *sk = skb->sk;
+
+ if (sk && !skb_tx(skb)->flags) {
+ /* skb_tx_hash() wont be able to get sk.
+ * We copy sk_hash into skb->rxhash
+ */
+ if (!skb->rxhash)
+ skb->rxhash = sk->sk_hash;
skb_orphan(skb);
+ }
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1981,8 +2007,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
- hash = (__force u16) skb->protocol;
-
+ hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
@@ -2005,12 +2030,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
- u16 queue_index;
+ int queue_index;
struct sock *sk = skb->sk;
- if (sk_tx_queue_recorded(sk)) {
- queue_index = sk_tx_queue_get(sk);
- } else {
+ queue_index = sk_tx_queue_get(sk);
+ if (queue_index < 0) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue) {
@@ -2254,11 +2278,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
- if (net_ratelimit()) {
- pr_warning("%s received packet on queue "
- "%u, but number of RX queues is %u\n",
- dev->name, index, dev->num_rx_queues);
- }
+ WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
+ "on queue %u, but number of RX queues is %u\n",
+ dev->name, index, dev->num_rx_queues);
goto done;
}
rxqueue = dev->_rx + index;
@@ -2796,7 +2818,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
struct net_device *orig_dev;
struct net_device *master;
struct net_device *null_or_orig;
- struct net_device *null_or_bond;
+ struct net_device *orig_or_bond;
int ret = NET_RX_DROP;
__be16 type;
@@ -2813,13 +2835,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (!skb->skb_iif)
skb->skb_iif = skb->dev->ifindex;
+ /*
+ * bonding note: skbs received on inactive slaves should only
+ * be delivered to pkt handlers that are exact matches. Also
+ * the deliver_no_wcard flag will be set. If packet handlers
+ * are sensitive to duplicate packets these skbs will need to
+ * be dropped at the handler. The vlan accel path may have
+ * already set the deliver_no_wcard flag.
+ */
null_or_orig = NULL;
orig_dev = skb->dev;
master = ACCESS_ONCE(orig_dev->master);
- if (master) {
- if (skb_bond_should_drop(skb, master))
+ if (skb->deliver_no_wcard)
+ null_or_orig = orig_dev;
+ else if (master) {
+ if (skb_bond_should_drop(skb, master)) {
+ skb->deliver_no_wcard = 1;
null_or_orig = orig_dev; /* deliver only exact match */
- else
+ } else
skb->dev = master;
}
@@ -2869,10 +2902,10 @@ ncls:
* device that may have registered for a specific ptype. The
* handler may have to adjust skb->dev and orig_dev.
*/
- null_or_bond = NULL;
+ orig_or_bond = orig_dev;
if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
(vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
- null_or_bond = vlan_dev_real_dev(skb->dev);
+ orig_or_bond = vlan_dev_real_dev(skb->dev);
}
type = skb->protocol;
@@ -2880,7 +2913,7 @@ ncls:
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type && (ptype->dev == null_or_orig ||
ptype->dev == skb->dev || ptype->dev == orig_dev ||
- ptype->dev == null_or_bond)) {
+ ptype->dev == orig_or_bond)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -4965,7 +4998,7 @@ int register_netdevice(struct net_device *dev)
}
}
- ret = dev_get_valid_name(net, dev->name, dev->name, 0);
+ ret = dev_get_valid_name(dev, dev->name, 0);
if (ret)
goto err_uninit;
@@ -4994,8 +5027,6 @@ int register_netdevice(struct net_device *dev)
if (dev->features & NETIF_F_SG)
dev->features |= NETIF_F_GSO;
- netdev_initialize_kobject(dev);
-
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
@@ -5547,15 +5578,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
if (dev->features & NETIF_F_NETNS_LOCAL)
goto out;
-#ifdef CONFIG_SYSFS
- /* Don't allow real devices to be moved when sysfs
- * is enabled.
- */
- err = -EINVAL;
- if (dev->dev.parent)
- goto out;
-#endif
-
/* Ensure the device has been registrered */
err = -EINVAL;
if (dev->reg_state != NETREG_REGISTERED)
@@ -5574,7 +5596,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- if (dev_get_valid_name(net, pat, dev->name, 1))
+ if (dev_get_valid_name(dev, pat, 1))
goto out;
}
@@ -5606,8 +5628,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_uc_flush(dev);
dev_mc_flush(dev);
- netdev_unregister_kobject(dev);
-
/* Actually switch the network namespace */
dev_net_set(dev, net);
@@ -5620,7 +5640,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
}
/* Fixup kobjects */
- err = netdev_register_kobject(dev);
+ err = device_rename(&dev->dev, dev->name);
WARN_ON(err);
/* Add the device back in the hashes */
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index cf208d8042b1..ad41529fb60f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -172,12 +172,12 @@ out:
return;
}
-static void trace_kfree_skb_hit(struct sk_buff *skb, void *location)
+static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
{
trace_drop_common(skb, location);
}
-static void trace_napi_poll_hit(struct napi_struct *napi)
+static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
{
struct dm_hw_stat_delta *new_stat;
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state)
switch (state) {
case TRACE_ON:
- rc |= register_trace_kfree_skb(trace_kfree_skb_hit);
- rc |= register_trace_napi_poll(trace_napi_poll_hit);
+ rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
+ rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
break;
case TRACE_OFF:
- rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit);
- rc |= unregister_trace_napi_poll(trace_napi_poll_hit);
+ rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
+ rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
tracepoint_synchronize_unregister();
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964033d2..75e4ffeb8cc9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -318,23 +318,33 @@ out:
}
static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
- void __user *useraddr)
+ u32 cmd, void __user *useraddr)
{
- struct ethtool_rxnfc cmd;
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
if (!dev->ethtool_ops->set_rxnfc)
return -EOPNOTSUPP;
- if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ /* struct ethtool_rxnfc was originally defined for
+ * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
+ * members. User-space might still be using that
+ * definition. */
+ if (cmd == ETHTOOL_SRXFH)
+ info_size = (offsetof(struct ethtool_rxnfc, data) +
+ sizeof(info.data));
+
+ if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
- return dev->ethtool_ops->set_rxnfc(dev, &cmd);
+ return dev->ethtool_ops->set_rxnfc(dev, &info);
}
static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
- void __user *useraddr)
+ u32 cmd, void __user *useraddr)
{
struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
const struct ethtool_ops *ops = dev->ethtool_ops;
int ret;
void *rule_buf = NULL;
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
if (!ops->get_rxnfc)
return -EOPNOTSUPP;
- if (copy_from_user(&info, useraddr, sizeof(info)))
+ /* struct ethtool_rxnfc was originally defined for
+ * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
+ * members. User-space might still be using that
+ * definition. */
+ if (cmd == ETHTOOL_GRXFH)
+ info_size = (offsetof(struct ethtool_rxnfc, data) +
+ sizeof(info.data));
+
+ if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
- rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
- GFP_USER);
+ if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
+ rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+ GFP_USER);
if (!rule_buf)
return -ENOMEM;
}
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
goto err_out;
ret = -EFAULT;
- if (copy_to_user(useraddr, &info, sizeof(info)))
+ if (copy_to_user(useraddr, &info, info_size))
goto err_out;
if (rule_buf) {
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
- rc = ethtool_get_rxnfc(dev, useraddr);
+ rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
break;
case ETHTOOL_SRXFH:
case ETHTOOL_SRXCLSRLDEL:
case ETHTOOL_SRXCLSRLINS:
- rc = ethtool_set_rxnfc(dev, useraddr);
+ rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
break;
case ETHTOOL_GGRO:
rc = ethtool_get_gro(dev, useraddr);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
/* Protects against soft lockup during large deletion */
static struct rb_root est_root = RB_ROOT;
+static DEFINE_SPINLOCK(est_tree_lock);
static void est_timer(unsigned long arg)
{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
*
* Returns 0 on success or a negative error code.
*
- * NOTE: Called under rtnl_mutex
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ spin_lock(&est_tree_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
+ spin_unlock(&est_tree_lock);
return 0;
}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
*
* Removes the rate estimator specified by &bstats and &rate_est.
*
- * NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est)
{
struct gen_estimator *e;
+ spin_lock(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
+ spin_unlock(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est)
{
+ bool res;
+
ASSERT_RTNL();
- return gen_find_node(bstats, rate_est) != NULL;
+ spin_lock(&est_tree_lock);
+ res = gen_find_node(bstats, rate_est) != NULL;
+ spin_unlock(&est_tree_lock);
+
+ return res;
}
EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bff37908bd55..a4e0a7482c2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(buff);
NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
}
+ skb_dst_force(skb);
__skb_queue_tail(&neigh->arp_queue, skb);
}
rc = 1;
@@ -948,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
{
struct hh_cache *hh;
void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
- = neigh->dev->header_ops->cache_update;
+ = NULL;
+
+ if (neigh->dev->header_ops)
+ update = neigh->dev->header_ops->cache_update;
if (update) {
for (hh = neigh->hh; hh; hh = hh->hh_next) {
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c57c4b228bb5..99e7052d7323 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -14,7 +14,9 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
+#include <linux/nsproxy.h>
#include <net/sock.h>
+#include <net/net_namespace.h>
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
#include <linux/vmalloc.h>
@@ -467,6 +469,7 @@ static struct attribute_group wireless_group = {
.attrs = wireless_attrs,
};
#endif
+#endif /* CONFIG_SYSFS */
#ifdef CONFIG_RPS
/*
@@ -766,7 +769,38 @@ static void rx_queue_remove_kobjects(struct net_device *net)
kset_unregister(net->queues_kset);
}
#endif /* CONFIG_RPS */
-#endif /* CONFIG_SYSFS */
+
+static const void *net_current_ns(void)
+{
+ return current->nsproxy->net_ns;
+}
+
+static const void *net_initial_ns(void)
+{
+ return &init_net;
+}
+
+static const void *net_netlink_ns(struct sock *sk)
+{
+ return sock_net(sk);
+}
+
+static struct kobj_ns_type_operations net_ns_type_operations = {
+ .type = KOBJ_NS_TYPE_NET,
+ .current_ns = net_current_ns,
+ .netlink_ns = net_netlink_ns,
+ .initial_ns = net_initial_ns,
+};
+
+static void net_kobj_ns_exit(struct net *net)
+{
+ kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
+}
+
+static struct pernet_operations kobj_net_ops = {
+ .exit = net_kobj_ns_exit,
+};
+
#ifdef CONFIG_HOTPLUG
static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
@@ -774,9 +808,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
struct net_device *dev = to_net_dev(d);
int retval;
- if (!net_eq(dev_net(dev), &init_net))
- return 0;
-
/* pass interface to uevent. */
retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
if (retval)
@@ -806,6 +837,13 @@ static void netdev_release(struct device *d)
kfree((char *)dev - dev->padded);
}
+static const void *net_namespace(struct device *d)
+{
+ struct net_device *dev;
+ dev = container_of(d, struct net_device, dev);
+ return dev_net(dev);
+}
+
static struct class net_class = {
.name = "net",
.dev_release = netdev_release,
@@ -815,6 +853,8 @@ static struct class net_class = {
#ifdef CONFIG_HOTPLUG
.dev_uevent = netdev_uevent,
#endif
+ .ns_type = &net_ns_type_operations,
+ .namespace = net_namespace,
};
/* Delete sysfs entries but hold kobject reference until after all
@@ -826,9 +866,6 @@ void netdev_unregister_kobject(struct net_device * net)
kobject_get(&dev->kobj);
- if (!net_eq(dev_net(net), &init_net))
- return;
-
#ifdef CONFIG_RPS
rx_queue_remove_kobjects(net);
#endif
@@ -843,6 +880,7 @@ int netdev_register_kobject(struct net_device *net)
const struct attribute_group **groups = net->sysfs_groups;
int error = 0;
+ device_initialize(dev);
dev->class = &net_class;
dev->platform_data = net;
dev->groups = groups;
@@ -865,9 +903,6 @@ int netdev_register_kobject(struct net_device *net)
#endif
#endif /* CONFIG_SYSFS */
- if (!net_eq(dev_net(net), &init_net))
- return 0;
-
error = device_add(dev);
if (error)
return error;
@@ -896,13 +931,9 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
EXPORT_SYMBOL(netdev_class_create_file);
EXPORT_SYMBOL(netdev_class_remove_file);
-void netdev_initialize_kobject(struct net_device *net)
-{
- struct device *device = &(net->dev);
- device_initialize(device);
-}
-
int netdev_kobject_init(void)
{
+ kobj_ns_type_register(&net_ns_type_operations);
+ register_pernet_subsys(&kobj_net_ops);
return class_register(&net_class);
}
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 14e7524260b3..805555e8b187 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,5 +4,4 @@
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
-void netdev_initialize_kobject(struct net_device *);
#endif
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
end_time = ktime_now();
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
- pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
+ pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e4b9870e4706..1a2af24e9e3d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
int num_vfs = dev_num_vf(dev->dev.parent);
- size_t size = nlmsg_total_size(sizeof(struct nlattr));
- size += nlmsg_total_size(num_vfs * sizeof(struct nlattr));
- size += num_vfs * (sizeof(struct ifla_vf_mac) +
- sizeof(struct ifla_vf_vlan) +
- sizeof(struct ifla_vf_tx_rate));
+ size_t size = nla_total_size(sizeof(struct nlattr));
+ size += nla_total_size(num_vfs * sizeof(struct nlattr));
+ size += num_vfs *
+ (nla_total_size(sizeof(struct ifla_vf_mac)) +
+ nla_total_size(sizeof(struct ifla_vf_vlan)) +
+ nla_total_size(sizeof(struct ifla_vf_tx_rate)));
return size;
} else
return 0;
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
vf_port = nla_nest_start(skb, IFLA_VF_PORT);
- if (!vf_port) {
- nla_nest_cancel(skb, vf_ports);
- return -EMSGSIZE;
- }
+ if (!vf_port)
+ goto nla_put_failure;
NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
+ if (err == -EMSGSIZE)
+ goto nla_put_failure;
if (err) {
-nla_put_failure:
nla_nest_cancel(skb, vf_port);
continue;
}
@@ -739,6 +739,10 @@ nla_put_failure:
nla_nest_end(skb, vf_ports);
return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, vf_ports);
+ return -EMSGSIZE;
}
static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
if (err) {
nla_nest_cancel(skb, port_self);
- return err;
+ return (err == -EMSGSIZE) ? err : 0;
}
nla_nest_end(skb, port_self);
@@ -1199,8 +1203,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr *attr;
int rem;
nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
- if (nla_type(attr) != IFLA_VF_INFO)
+ if (nla_type(attr) != IFLA_VF_INFO) {
+ err = -EINVAL;
goto errout;
+ }
err = do_setvfinfo(dev, attr);
if (err < 0)
goto errout;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4c11000a96aa..ce88293a34e2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
* reference count dropping and cleans up the skbuff as if it
* just came from __alloc_skb().
*/
-int skb_recycle_check(struct sk_buff *skb, int skb_size)
+bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
struct skb_shared_info *shinfo;
if (irqs_disabled())
- return 0;
+ return false;
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return 0;
+ return false;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
if (skb_end_pointer(skb) - skb->head < skb_size)
- return 0;
+ return false;
if (skb_shared(skb) || skb_cloned(skb))
- return 0;
+ return false;
skb_release_head_state(skb);
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
skb->data = skb->head + NET_SKB_PAD;
skb_reset_tail_pointer(skb);
- return 1;
+ return true;
}
EXPORT_SYMBOL(skb_recycle_check);
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
new->priority = old->priority;
+ new->deliver_no_wcard = old->deliver_no_wcard;
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
new->ipvs_property = old->ipvs_property;
#endif
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(len);
C(data_len);
C(mac_len);
- C(rxhash);
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb->network_header += off;
if (skb_mac_header_was_set(skb))
skb->mac_header += off;
- skb->csum_start += nhead;
+ /* Only adjust this if it actually is csum_start rather than csum */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb->csum_start += nhead;
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
copy_skb_header(n, skb);
off = newheadroom - oldheadroom;
- n->csum_start += off;
+ if (n->ip_summed == CHECKSUM_PARTIAL)
+ n->csum_start += off;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
n->transport_header += off;
n->network_header += off;
@@ -1406,12 +1409,13 @@ new_page:
/*
* Fill page/offset/length into spd, if it can hold more pages.
*/
-static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+static inline int spd_fill_page(struct splice_pipe_desc *spd,
+ struct pipe_inode_info *pipe, struct page *page,
unsigned int *len, unsigned int offset,
struct sk_buff *skb, int linear,
struct sock *sk)
{
- if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+ if (unlikely(spd->nr_pages == pipe->buffers))
return 1;
if (linear) {
@@ -1447,7 +1451,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
unsigned int *len, struct sk_buff *skb,
struct splice_pipe_desc *spd, int linear,
- struct sock *sk)
+ struct sock *sk,
+ struct pipe_inode_info *pipe)
{
if (!*len)
return 1;
@@ -1470,7 +1475,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
/* the linear region may spread across several pages */
flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
- if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
+ if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
return 1;
__segment_seek(&page, &poff, &plen, flen);
@@ -1485,9 +1490,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
* Map linear and fragment data from the skb to spd. It reports failure if the
* pipe is full or if we already spliced the requested length.
*/
-static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
- unsigned int *len, struct splice_pipe_desc *spd,
- struct sock *sk)
+static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+ unsigned int *offset, unsigned int *len,
+ struct splice_pipe_desc *spd, struct sock *sk)
{
int seg;
@@ -1497,7 +1502,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
- offset, len, skb, spd, 1, sk))
+ offset, len, skb, spd, 1, sk, pipe))
return 1;
/*
@@ -1507,7 +1512,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(f->page, f->page_offset, f->size,
- offset, len, skb, spd, 0, sk))
+ offset, len, skb, spd, 0, sk, pipe))
return 1;
}
@@ -1524,8 +1529,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags)
{
- struct partial_page partial[PIPE_BUFFERS];
- struct page *pages[PIPE_BUFFERS];
+ struct partial_page partial[PIPE_DEF_BUFFERS];
+ struct page *pages[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
@@ -1535,12 +1540,16 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
};
struct sk_buff *frag_iter;
struct sock *sk = skb->sk;
+ int ret = 0;
+
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
/*
* __skb_splice_bits() only fails if the output has no room left,
* so no point in going over the frag_list for the error case.
*/
- if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
+ if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
goto done;
else if (!tlen)
goto done;
@@ -1551,14 +1560,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
skb_walk_frags(skb, frag_iter) {
if (!tlen)
break;
- if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
+ if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
break;
}
done:
if (spd.nr_pages) {
- int ret;
-
/*
* Drop the socket lock, otherwise we have reverse
* locking dependencies between sk_lock and i_mutex
@@ -1571,10 +1578,10 @@ done:
release_sock(sk);
ret = splice_to_pipe(pipe, &spd);
lock_sock(sk);
- return ret;
}
- return 0;
+ splice_shrink_spd(pipe, &spd);
+ return ret;
}
/**
@@ -2961,6 +2968,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
}
EXPORT_SYMBOL_GPL(skb_cow_data);
+static void sock_rmem_free(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+}
+
+/*
+ * Note: We dont mem charge error packets (no sk_forward_alloc changes)
+ */
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
+ return -ENOMEM;
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_rmem_free;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+
+ skb_queue_tail(&sk->sk_error_queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb->len);
+ return 0;
+}
+EXPORT_SYMBOL(sock_queue_err_skb);
+
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps)
{
@@ -2992,7 +3027,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
err = sock_queue_err_skb(sk, skb);
+
if (err)
kfree_skb(skb);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index bf88a167c8f2..2cf7f9f7e775 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -123,6 +123,7 @@
#include <linux/net_tstamp.h>
#include <net/xfrm.h>
#include <linux/ipsec.h>
+#include <net/cls_cgroup.h>
#include <linux/filter.h>
@@ -217,6 +218,11 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
+#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
+int net_cls_subsys_id = -1;
+EXPORT_SYMBOL_GPL(net_cls_subsys_id);
+#endif
+
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
struct timeval tv;
@@ -1050,6 +1056,17 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
module_put(owner);
}
+#ifdef CONFIG_CGROUPS
+void sock_update_classid(struct sock *sk)
+{
+ u32 classid = task_cls_classid(current);
+
+ if (classid && classid != sk->sk_classid)
+ sk->sk_classid = classid;
+}
+EXPORT_SYMBOL(sock_update_classid);
+#endif
+
/**
* sk_alloc - All socket objects are allocated here
* @net: the applicable net namespace
@@ -1073,6 +1090,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sock_lock_init(sk);
sock_net_set(sk, get_net(net));
atomic_set(&sk->sk_wmem_alloc, 1);
+
+ sock_update_classid(sk);
}
return sk;
@@ -1988,6 +2007,39 @@ void release_sock(struct sock *sk)
}
EXPORT_SYMBOL(release_sock);
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken
+ * sk_lock.slock locked, owned = 0, BH disabled
+ * return true if slow path is taken
+ * sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+bool lock_sock_fast(struct sock *sk)
+{
+ might_sleep();
+ spin_lock_bh(&sk->sk_lock.slock);
+
+ if (!sk->sk_lock.owned)
+ /*
+ * Note : We must disable BH
+ */
+ return false;
+
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+ spin_unlock(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+ local_bh_enable();
+ return true;
+}
+EXPORT_SYMBOL(lock_sock_fast);
+
int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
struct timeval tv;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 58f7bc156850..6beb6a7d6fba 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -124,9 +124,9 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
return queued;
}
-static u8 dccp_reset_code_convert(const u8 code)
+static u16 dccp_reset_code_convert(const u8 code)
{
- const u8 error_code[] = {
+ const u16 error_code[] = {
[DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
[DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
[DCCP_RESET_CODE_ABORTED] = ECONNRESET,
@@ -148,7 +148,7 @@ static u8 dccp_reset_code_convert(const u8 code)
static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
{
- u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
+ u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
sk->sk_err = err;
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 1b08cae9c65b..07395f861d35 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -296,7 +296,7 @@ static inline u8 dccp_ndp_len(const u64 ndp)
{
if (likely(ndp <= 0xFF))
return 1;
- return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
+ return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
}
int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index c51b55400dc5..11201784d29a 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
menuconfig NET_DSA
bool "Distributed Switch Architecture support"
default n
- depends on EXPERIMENTAL && !S390
+ depends on EXPERIMENTAL && NET_ETHERNET && !S390
select PHYLIB
---help---
This allows you to use hardware switch chips that use
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 3d803a1b9fb6..1627ef2e8522 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -147,13 +147,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size,
GFP_KERNEL);
+ if (!phy)
+ goto out;
mutex_lock(&wpan_phy_mutex);
phy->idx = wpan_phy_idx++;
if (unlikely(!wpan_phy_idx_valid(phy->idx))) {
wpan_phy_idx--;
mutex_unlock(&wpan_phy_mutex);
kfree(phy);
- return NULL;
+ goto out;
}
mutex_unlock(&wpan_phy_mutex);
@@ -168,6 +170,9 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
phy->current_page = 0; /* for compatibility */
return phy;
+
+out:
+ return NULL;
}
EXPORT_SYMBOL(wpan_phy_alloc);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e3a1fd938ab..7c3a7d191249 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -303,7 +303,7 @@ config ARPD
If unsure, say N.
config SYN_COOKIES
- bool "IP: TCP syncookie support (disabled per default)"
+ bool "IP: TCP syncookie support"
---help---
Normal TCP/IP networking is open to an attack known as "SYN
flooding". This denial-of-service attack prevents legitimate remote
@@ -328,13 +328,13 @@ config SYN_COOKIES
server is really overloaded. If this happens frequently better turn
them off.
- If you say Y here, note that SYN cookies aren't enabled by default;
- you can enable them by saying Y to "/proc file system support" and
+ If you say Y here, you can disable SYN cookies at run time by
+ saying Y to "/proc file system support" and
"Sysctl support" below and executing the command
- echo 1 >/proc/sys/net/ipv4/tcp_syncookies
+ echo 0 > /proc/sys/net/ipv4/tcp_syncookies
- at boot time after the /proc file system has been mounted.
+ after the /proc file system has been mounted.
If unsure, say N.
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9a4a6c96cb0d..041d41df1224 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -873,8 +873,10 @@ int ip_append_data(struct sock *sk,
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
+ skb = skb_peek_tail(&sk->sk_write_queue);
+
inet->cork.length += length;
- if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
+ if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
@@ -892,7 +894,7 @@ int ip_append_data(struct sock *sk,
* adding appropriate IP header.
*/
- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+ if (!skb)
goto alloc_new_skb;
while (length > 0) {
@@ -1121,7 +1123,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
return -EINVAL;
inet->cork.length += size;
- if ((sk->sk_protocol == IPPROTO_UDP) &&
+ if ((size + skb->len > mtu) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 45889103b3e2..7f6273506eea 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net)
{
struct mr_table *mrt, *next;
- list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
+ list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
+ list_del(&mrt->list);
kfree(mrt);
+ }
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
#else
@@ -440,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
err = ipmr_fib_lookup(net, &fl, &mrt);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return err;
+ }
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
@@ -1726,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
goto dont_forward;
err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return err;
+ }
if (!local) {
if (IPCB(skb)->opt.router_alert) {
@@ -1911,7 +1917,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct rtattr *mp_head;
/* If cache is unresolved, don't try to parse IIF and OIF */
- if (c->mfc_parent > MAXVIFS)
+ if (c->mfc_parent >= MAXVIFS)
return -ENOENT;
if (VIF_EXISTS(mrt, c->mfc_parent))
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 63958f3394a5..4b6c5ca610fc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
cpu = smp_processor_id();
table_base = private->entries[cpu];
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
- stackptr = &private->stackptr[cpu];
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5c24db4a3c91..9f6b22206c52 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
{ .sport = th->dest,
.dport = th->source } } };
security_req_classify_flow(req, &fl);
- if (ip_route_output_key(&init_net, &rt, &fl)) {
+ if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
reqsk_free(req);
goto out;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6596b4feeddc..65afeaec15b7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
ssize_t spliced;
int ret;
+ sock_rps_record_flow(sk);
/*
* We can't seek on a socket input
*/
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index c209e054a634..377bc9349371 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* calculate 2^fract in a <<7 value.
*/
is_slowstart = 1;
- increment = ((1 << ca->rho) * hybla_fraction(rho_fractions))
- - 128;
+ increment = ((1 << min(ca->rho, 16U)) *
+ hybla_fraction(rho_fractions)) - 128;
} else {
/*
* congestion avoidance
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e6dafcb1071..548d575e6cc6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
if (sk->sk_family == AF_INET) {
printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
msg,
- &inet->daddr, ntohs(inet->dport),
+ &inet->inet_daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
struct ipv6_pinfo *np = inet6_sk(sk);
printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
msg,
- &np->daddr, ntohs(inet->dport),
+ &np->daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 202cf09c4cd4..fe193e53af44 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ sock_rps_save_rxhash(sk, skb->rxhash);
TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
}
return 0;
}
- }
+ } else
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
TCP_CHECK_TIMER(sk);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
@@ -1672,8 +1675,6 @@ process:
skb->dev = NULL;
- sock_rps_save_rxhash(sk, skb->rxhash);
-
bh_lock_sock_nested(sk);
ret = 0;
if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957f201a..7ed9dc1042d1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
int mib_idx;
int fwd_rexmitting = 0;
+ if (!tp->packets_out)
+ return;
+
if (!tp->lost_out)
tp->retransmit_high = tp->snd_una;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9de6a698f91d..eec4ff456e33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
- } else {
+ } else
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
- }
+
sk->sk_err = err;
sk->sk_error_report(sk);
out:
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
- lock_sock_bh(sk);
+ bool slow = lock_sock_fast(sk);
+
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
}
return res;
}
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
+ bool slow;
/*
* Check any passed addresses
@@ -1197,10 +1199,10 @@ out:
return err;
csum_copy_err:
- lock_sock_bh(sk);
+ slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
if (noblock)
return -EAGAIN;
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
void udp_destroy_sock(struct sock *sk)
{
- lock_sock_bh(sk);
+ bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
}
/*
@@ -1686,8 +1688,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
val = 8;
- else if (val > USHORT_MAX)
- val = USHORT_MAX;
+ else if (val > USHRT_MAX)
+ val = USHRT_MAX;
up->pcslen = val;
up->pcflag |= UDPLITE_SEND_CC;
break;
@@ -1700,8 +1702,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Avoid silly minimal values. */
val = 8;
- else if (val > USHORT_MAX)
- val = USHORT_MAX;
+ else if (val > USHRT_MAX)
+ val = USHRT_MAX;
up->pcrlen = val;
up->pcflag |= UDPLITE_RECV_CC;
break;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..23883a48ebfb 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
memset(fl, 0, sizeof(struct flowi));
+ fl->mark = skb->mark;
+
if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
switch (iph->protocol) {
case IPPROTO_UDP:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e1a698df5706..784f34d11fdd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1760,7 +1760,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
idev = ipv6_find_idev(dev);
if (!idev)
- return NULL;
+ return ERR_PTR(-ENOBUFS);
+
+ if (idev->cnf.disable_ipv6)
+ return ERR_PTR(-EACCES);
/* Add default multicast route */
addrconf_add_mroute(dev);
@@ -2129,8 +2132,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
if (!dev)
return -ENODEV;
- if ((idev = addrconf_add_dev(dev)) == NULL)
- return -ENOBUFS;
+ idev = addrconf_add_dev(dev);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
scope = ipv6_addr_scope(pfx);
@@ -2377,7 +2381,7 @@ static void addrconf_dev_config(struct net_device *dev)
}
idev = addrconf_add_dev(dev);
- if (idev == NULL)
+ if (IS_ERR(idev))
return;
memset(&addr, 0, sizeof(struct in6_addr));
@@ -2468,7 +2472,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
ASSERT_RTNL();
idev = addrconf_add_dev(dev);
- if (!idev) {
+ if (IS_ERR(idev)) {
printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
return;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ce7992982557..03e62f94ff8e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,7 +483,7 @@ route_done:
np->tclass, NULL, &fl, (struct rt6_info*)dst,
MSG_DONTWAIT, np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
goto out_put;
}
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
goto out_put;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd963f64e27c..89425af0684c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb)
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index bd9e7d3e9c8e..66078dad7fe8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
-#define ip6mr_for_each_table(mrt, met) \
+#define ip6mr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
{
struct mr6_table *mrt, *next;
- list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list)
+ list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
+ list_del(&mrt->list);
ip6mr_free_table(mrt);
+ }
fib_rules_unregister(net->ipv6.mr6_rules_ops);
}
#else
@@ -2017,7 +2019,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
struct rtattr *mp_head;
/* If cache is unresolved, don't try to parse IIF and OIF */
- if (c->mf6c_parent > MAXMIFS)
+ if (c->mf6c_parent >= MAXMIFS)
return -ENOENT;
if (MIF_EXISTS(mrt, c->mf6c_parent))
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 59f1881968c7..ab1622d7d409 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
- skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err);
+ size += LL_ALLOCATED_SPACE(dev);
+ /* limit our allocations to order-0 page */
+ size = min_t(int, size, SKB_MAX_ORDER(0, 0));
+ skb = sock_alloc_send_skb(sk, size, 1, &err);
if (!skb)
return NULL;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 2794b6002836..d6e9599d0705 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
int err = rt2->rt_hdr.nexthdr;
spin_lock(&x->lock);
- if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) &&
+ if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
!ipv6_addr_any((struct in6_addr *)x->coaddr))
err = -ENOENT;
spin_unlock(&x->lock);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0abdc242ddb7..2efef52fb461 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -586,6 +586,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = 0;
+ inc_opt |= ifp->idev->cnf.force_tllao;
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -599,7 +600,6 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
icmp6h.icmp6_solicited = solicited;
icmp6h.icmp6_override = override;
- inc_opt |= ifp->idev->cnf.force_tllao;
__ndisc_send(dev, neigh, daddr, src_addr,
&icmp6h, solicited_addr,
inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 6f517bd83692..9d2d68f0e605 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
cpu = smp_processor_id();
table_base = private->entries[cpu];
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
- stackptr = &private->stackptr[cpu];
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
origptr = *stackptr;
e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 47d227713758..2933396e0281 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
fl.fl_ip_dport = otcph.source;
security_skb_classify_flow(oldskb, &fl);
dst = ip6_route_output(net, NULL, &fl);
- if (dst == NULL)
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
return;
- if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0))
+ }
+ if (xfrm_lookup(net, &dst, &fl, NULL, 0))
return;
hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 294cbe8b0725..252d76199c41 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
{
int flags = 0;
- if (fl->oif || rt6_need_strict(&fl->fl6_dst))
+ if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
flags |= RT6_LOOKUP_F_IFACE;
if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3d7a2c0b836a..87be58673b55 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
+ bool slow;
if (addr_len)
*addr_len=sizeof(struct sockaddr_in6);
@@ -424,7 +425,7 @@ out:
return err;
csum_copy_err:
- lock_sock_bh(sk);
+ slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
@@ -433,7 +434,7 @@ csum_copy_err:
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
- unlock_sock_bh(sk);
+ unlock_sock_fast(sk, slow);
if (flags & MSG_DONTWAIT)
return -EAGAIN;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a0e77e14468..6baeabbbca82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
u8 nexthdr = nh[IP6CB(skb)->nhoff];
memset(fl, 0, sizeof(struct flowi));
+ fl->mark = skb->mark;
+
ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c8b4599a752e..9637e45744fa 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
save_message:
save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
if (!save_msg)
- return;
+ goto out_unlock;
save_msg->path = path;
save_msg->msg = *msg;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fd8b28361a64..f28ad2cc8428 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
if (!iucv_irq_data[cpu])
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
+
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
if (!iucv_param[cpu]) {
kfree(iucv_irq_data[cpu]);
iucv_irq_data[cpu] = NULL;
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
iucv_param[cpu] = NULL;
kfree(iucv_irq_data[cpu]);
iucv_irq_data[cpu] = NULL;
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_UP_CANCELED:
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
cpu_clear(cpu, cpumask);
if (cpus_empty(cpumask))
/* Can't offline last IUCV enabled cpu. */
- return NOTIFY_BAD;
+ return notifier_from_errno(-EINVAL);
smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
if (cpus_empty(iucv_irq_cpumask))
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 730197591ab5..ba9360a475b0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -259,7 +259,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&sta->tx_filtered);
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
- sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX);
+ sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: Allocated STA %pM\n",
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index d8f7e8ef67b4..ff04e9edbed6 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -162,6 +162,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -174,6 +175,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
ret = 0;
}
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
@@ -193,6 +195,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
list_del(&cp->c_list);
@@ -202,6 +205,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
} else
ret = 0;
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 445de702b8b7..e34622fa0003 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
vfree(info->jumpstack);
else
kfree(info->jumpstack);
- if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
- vfree(info->stackptr);
- else
- kfree(info->stackptr);
+
+ free_percpu(info->stackptr);
kfree(info);
}
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
unsigned int size;
int cpu;
- size = sizeof(unsigned int) * nr_cpu_ids;
- if (size > PAGE_SIZE)
- i->stackptr = vmalloc(size);
- else
- i->stackptr = kmalloc(size, GFP_KERNEL);
+ i->stackptr = alloc_percpu(unsigned int);
if (i->stackptr == NULL)
return -ENOMEM;
- memset(i->stackptr, 0, size);
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
struct xt_table_info *private;
struct xt_table *t, *table;
- ret = xt_jumpstack_alloc(newinfo);
- if (ret < 0)
- return ERR_PTR(ret);
-
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index d7920d9f49e9..859d9fd429c8 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
if (ip_route_output_key(net, &rt, &fl) != 0)
return false;
- dst_release(skb_dst(skb));
+ skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
skb->dev = rt->u.dst.dev;
skb->protocol = htons(ETH_P_IP);
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
if (dst == NULL)
return false;
- dst_release(skb_dst(skb));
+ skb_dst_drop(skb);
skb_dst_set(skb, dst);
skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6464a1972a69..a2eb965207d3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -978,6 +978,8 @@ struct netlink_broadcast_data {
int delivered;
gfp_t allocation;
struct sk_buff *skb, *skb2;
+ int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
+ void *tx_data;
};
static inline int do_one_broadcast(struct sock *sk,
@@ -1020,6 +1022,9 @@ static inline int do_one_broadcast(struct sock *sk,
p->failure = 1;
if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
p->delivery_failure = 1;
+ } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
+ kfree_skb(p->skb2);
+ p->skb2 = NULL;
} else if (sk_filter(sk, p->skb2)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
@@ -1038,8 +1043,10 @@ out:
return 0;
}
-int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
- u32 group, gfp_t allocation)
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
+ u32 group, gfp_t allocation,
+ int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ void *filter_data)
{
struct net *net = sock_net(ssk);
struct netlink_broadcast_data info;
@@ -1059,6 +1066,8 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
info.allocation = allocation;
info.skb = skb;
info.skb2 = NULL;
+ info.tx_filter = filter;
+ info.tx_data = filter_data;
/* While we sleep in clone, do not allow to change socket list */
@@ -1083,6 +1092,14 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
}
return -ESRCH;
}
+EXPORT_SYMBOL(netlink_broadcast_filtered);
+
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
+ u32 group, gfp_t allocation)
+{
+ return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
+ NULL, NULL);
+}
EXPORT_SYMBOL(netlink_broadcast);
struct netlink_set_err_data {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index af4d38bc3b22..b2a3ae6cad78 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -626,6 +626,7 @@ static void pep_sock_close(struct sock *sk, long timeout)
struct pep_sock *pn = pep_sk(sk);
int ifindex = 0;
+ sock_hold(sk); /* keep a reference after sk_common_release() */
sk_common_release(sk);
lock_sock(sk);
@@ -644,6 +645,7 @@ static void pep_sock_close(struct sock *sk, long timeout)
if (ifindex)
gprs_detach(sk);
+ sock_put(sk);
}
static int pep_wait_connreq(struct sock *sk, int noblock)
@@ -696,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
newsk = NULL;
goto out;
}
+ kfree_skb(oskb);
sock_hold(sk);
pep_sk(newsk)->listener = sk;
@@ -1043,12 +1046,12 @@ static void pep_sock_unhash(struct sock *sk)
lock_sock(sk);
if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
skparent = pn->listener;
- sk_del_node_init(sk);
release_sock(sk);
- sk = skparent;
pn = pep_sk(skparent);
- lock_sock(sk);
+ lock_sock(skparent);
+ sk_del_node_init(sk);
+ sk = skparent;
}
/* Unhash a listening sock only when it is closed
* and all of its active connected pipes are closed. */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 10ed0d55f759..f68832798db2 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
err = rds_ib_setup_qp(conn);
if (err) {
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
+ mutex_unlock(&conn->c_cm_lock);
goto out;
}
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a9d951b4fbae..b5dd6ac39be8 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
err = rds_iw_setup_qp(conn);
if (err) {
rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
+ mutex_unlock(&conn->c_cm_lock);
goto out;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c0b6863e3b87..1980b71c283f 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -33,6 +33,7 @@
static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
static u32 mirred_idx_gen;
static DEFINE_RWLOCK(mirred_lock);
+static LIST_HEAD(mirred_list);
static struct tcf_hashinfo mirred_hash_info = {
.htab = tcf_mirred_ht,
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
m->tcf_bindcnt--;
m->tcf_refcnt--;
if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
- dev_put(m->tcfm_dev);
+ list_del(&m->tcfm_list);
+ if (m->tcfm_dev)
+ dev_put(m->tcfm_dev);
tcf_hash_destroy(&m->common, &mirred_hash_info);
return 1;
}
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
m->tcfm_ok_push = ok_push;
}
spin_unlock_bh(&m->tcf_lock);
- if (ret == ACT_P_CREATED)
+ if (ret == ACT_P_CREATED) {
+ list_add(&m->tcfm_list, &mirred_list);
tcf_hash_insert(pc, &mirred_hash_info);
+ }
return ret;
}
@@ -162,9 +167,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
m->tcf_tm.lastuse = jiffies;
dev = m->tcfm_dev;
+ if (!dev) {
+ printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+ goto out;
+ }
+
if (!(dev->flags & IFF_UP)) {
if (net_ratelimit())
- pr_notice("tc mirred to Houston: device %s is gone!\n",
+ pr_notice("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
}
@@ -232,6 +242,28 @@ nla_put_failure:
return -1;
}
+static int mirred_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct tcf_mirred *m;
+
+ if (event == NETDEV_UNREGISTER)
+ list_for_each_entry(m, &mirred_list, tcfm_list) {
+ if (m->tcfm_dev == dev) {
+ dev_put(dev);
+ m->tcfm_dev = NULL;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mirred_device_notifier = {
+ .notifier_call = mirred_device_event,
+};
+
+
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.hinfo = &mirred_hash_info,
@@ -252,12 +284,17 @@ MODULE_LICENSE("GPL");
static int __init mirred_init_module(void)
{
+ int err = register_netdevice_notifier(&mirred_device_notifier);
+ if (err)
+ return err;
+
pr_info("Mirror/redirect action on\n");
return tcf_register_action(&act_mirred_ops);
}
static void __exit mirred_cleanup_module(void)
{
+ unregister_netdevice_notifier(&mirred_device_notifier);
tcf_unregister_action(&act_mirred_ops);
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d885ba311564..724553e8ed7b 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
iph->daddr = new_addr;
csum_replace4(&iph->check, addr, new_addr);
+ } else if ((iph->frag_off & htons(IP_OFFSET)) ||
+ iph->protocol != IPPROTO_ICMP) {
+ goto out;
}
ihl = iph->ihl * 4;
@@ -202,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
{
struct icmphdr *icmph;
- if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+ if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
goto drop;
icmph = (void *)(skb_network_header(skb) + ihl);
@@ -212,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
(icmph->type != ICMP_PARAMETERPROB))
break;
+ if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
+ goto drop;
+
iph = (void *)(icmph + 1);
if (egress)
addr = iph->daddr;
@@ -247,6 +253,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
break;
}
+out:
return action;
drop:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fdbd0b7bd840..50e3d945e1f4 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_pedit *p = a->priv;
int i, munged = 0;
- u8 *pptr;
+ unsigned int off;
if (!(skb->tc_verd & TC_OK2MUNGE)) {
/* should we set skb->cloned? */
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
}
}
- pptr = skb_network_header(skb);
+ off = skb_network_offset(skb);
spin_lock(&p->tcf_lock);
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
struct tc_pedit_key *tkey = p->tcfp_keys;
for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
- u32 *ptr;
+ u32 *ptr, _data;
int offset = tkey->off;
if (tkey->offmask) {
- if (skb->len > tkey->at) {
- char *j = pptr + tkey->at;
- offset += ((*j & tkey->offmask) >>
- tkey->shift);
- } else {
+ char *d, _d;
+
+ d = skb_header_pointer(skb, off + tkey->at, 1,
+ &_d);
+ if (!d)
goto bad;
- }
+ offset += (*d & tkey->offmask) >> tkey->shift;
}
if (offset % 4) {
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
goto bad;
}
- ptr = (u32 *)(pptr+offset);
+ ptr = skb_header_pointer(skb, off + offset, 4, &_data);
+ if (!ptr)
+ goto bad;
/* just do it, baby */
*ptr = ((*ptr & tkey->mask) ^ tkey->val);
+ if (ptr == &_data)
+ skb_store_bits(skb, off + offset, ptr, 4);
munged++;
}
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 221180384fd7..78ef2c5e130b 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -16,14 +16,11 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/cgroup.h>
+#include <linux/rcupdate.h>
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
-
-struct cgroup_cls_state
-{
- struct cgroup_subsys_state css;
- u32 classid;
-};
+#include <net/sock.h>
+#include <net/cls_cgroup.h>
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
struct cgroup *cgrp);
@@ -112,6 +109,10 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct cls_cgroup_head *head = tp->root;
u32 classid;
+ rcu_read_lock();
+ classid = task_cls_state(current)->classid;
+ rcu_read_unlock();
+
/*
* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
@@ -122,12 +123,12 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
- if (softirq_count() != SOFTIRQ_OFFSET)
- return -1;
-
- rcu_read_lock();
- classid = task_cls_state(current)->classid;
- rcu_read_unlock();
+ if (softirq_count() != SOFTIRQ_OFFSET) {
+ /* If there is an sk_classid we'll use that. */
+ if (!skb->sk)
+ return -1;
+ classid = skb->sk->sk_classid;
+ }
if (!classid)
return -1;
@@ -289,18 +290,35 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
static int __init init_cgroup_cls(void)
{
- int ret = register_tcf_proto_ops(&cls_cgroup_ops);
- if (ret)
- return ret;
+ int ret;
+
ret = cgroup_load_subsys(&net_cls_subsys);
if (ret)
- unregister_tcf_proto_ops(&cls_cgroup_ops);
+ goto out;
+
+#ifndef CONFIG_NET_CLS_CGROUP
+ /* We can't use rcu_assign_pointer because this is an int. */
+ smp_wmb();
+ net_cls_subsys_id = net_cls_subsys.subsys_id;
+#endif
+
+ ret = register_tcf_proto_ops(&cls_cgroup_ops);
+ if (ret)
+ cgroup_unload_subsys(&net_cls_subsys);
+
+out:
return ret;
}
static void __exit exit_cgroup_cls(void)
{
unregister_tcf_proto_ops(&cls_cgroup_ops);
+
+#ifndef CONFIG_NET_CLS_CGROUP
+ net_cls_subsys_id = -1;
+ synchronize_rcu();
+#endif
+
cgroup_unload_subsys(&net_cls_subsys);
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 96275422c619..4f522143811e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
{
struct {
struct tc_u_knode *knode;
- u8 *ptr;
+ unsigned int off;
} stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
- u8 *ptr = skb_network_header(skb);
+ unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
int off2 = 0;
@@ -134,8 +134,14 @@ next_knode:
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
-
- if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
+ unsigned int toff;
+ __be32 *data, _data;
+
+ toff = off + key->off + (off2 & key->offmask);
+ data = skb_header_pointer(skb, toff, 4, &_data);
+ if (!data)
+ goto out;
+ if ((*data ^ key->val) & key->mask) {
n = n->next;
goto next_knode;
}
@@ -174,29 +180,45 @@ check_terminal:
if (sdepth >= TC_U32_MAXDEPTH)
goto deadloop;
stack[sdepth].knode = n;
- stack[sdepth].ptr = ptr;
+ stack[sdepth].off = off;
sdepth++;
ht = n->ht_down;
sel = 0;
- if (ht->divisor)
- sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
-
+ if (ht->divisor) {
+ __be32 *data, _data;
+
+ data = skb_header_pointer(skb, off + n->sel.hoff, 4,
+ &_data);
+ if (!data)
+ goto out;
+ sel = ht->divisor & u32_hash_fold(*data, &n->sel,
+ n->fshift);
+ }
if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
goto next_ht;
if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
- if (n->sel.flags&TC_U32_VAROFFSET)
- off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
+ if (n->sel.flags & TC_U32_VAROFFSET) {
+ __be16 *data, _data;
+
+ data = skb_header_pointer(skb,
+ off + n->sel.offoff,
+ 2, &_data);
+ if (!data)
+ goto out;
+ off2 += ntohs(n->sel.offmask & *data) >>
+ n->sel.offshift;
+ }
off2 &= ~3;
}
if (n->sel.flags&TC_U32_EAT) {
- ptr += off2;
+ off += off2;
off2 = 0;
}
- if (ptr < skb_tail_pointer(skb))
+ if (off < skb->len)
goto next_ht;
}
@@ -204,9 +226,10 @@ check_terminal:
if (sdepth--) {
n = stack[sdepth].knode;
ht = n->ht_up;
- ptr = stack[sdepth].ptr;
+ off = stack[sdepth].off;
goto check_terminal;
}
+out:
return -1;
deadloop:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index fe35c1f338c2..b9e8c3b7d406 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1195,6 +1195,11 @@ nla_put_failure:
return -1;
}
+static bool tc_qdisc_dump_ignore(struct Qdisc *q)
+{
+ return (q->flags & TCQ_F_BUILTIN) ? true : false;
+}
+
static int qdisc_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new)
@@ -1206,11 +1211,11 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (old && old->handle) {
+ if (old && !tc_qdisc_dump_ignore(old)) {
if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
goto err_out;
}
- if (new) {
+ if (new && !tc_qdisc_dump_ignore(new)) {
if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out;
}
@@ -1223,11 +1228,6 @@ err_out:
return -EINVAL;
}
-static bool tc_qdisc_dump_ignore(struct Qdisc *q)
-{
- return (q->flags & TCQ_F_BUILTIN) ? true : false;
-}
-
static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
struct netlink_callback *cb,
int *q_idx_p, int s_q_idx)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3415b6ce1c0a..807643bdcbac 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev)
dev->tx_queue_len = 100;
dev->flags = IFF_NOARP;
dev->hard_header_len = LL_MAX_HEADER;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static LIST_HEAD(master_dev_list);
diff --git a/net/socket.c b/net/socket.c
index f9f7d0872cac..367d5477d00f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -94,6 +94,7 @@
#include <net/compat.h>
#include <net/wext.h>
+#include <net/cls_cgroup.h>
#include <net/sock.h>
#include <linux/netfilter.h>
@@ -558,6 +559,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sock_iocb *si = kiocb_to_siocb(iocb);
int err;
+ sock_update_classid(sock->sk);
+
si->sock = sock;
si->scm = NULL;
si->msg = msg;
@@ -684,6 +687,8 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
{
struct sock_iocb *si = kiocb_to_siocb(iocb);
+ sock_update_classid(sock->sk);
+
si->sock = sock;
si->scm = NULL;
si->msg = msg;
@@ -777,6 +782,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
if (unlikely(!sock->ops->splice_read))
return -EINVAL;
+ sock_update_classid(sock->sk);
+
return sock->ops->splice_read(sock, ppos, pipe, len, flags);
}
@@ -3069,6 +3076,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
+ sock_update_classid(sock->sk);
+
if (sock->ops->sendpage)
return sock->ops->sendpage(sock, page, offset, size, flags);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index c2173ebdb33c..58de76c8540c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -34,6 +34,7 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/smp_lock.h>
#define RPCDBG_FACILITY RPCDBG_CACHE
@@ -1545,12 +1546,18 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
return cache_poll(filp, wait, cd);
}
-static int cache_ioctl_pipefs(struct inode *inode, struct file *filp,
+static long cache_ioctl_pipefs(struct file *filp,
unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_dentry->d_inode;
struct cache_detail *cd = RPC_I(inode)->private;
+ long ret;
- return cache_ioctl(inode, filp, cmd, arg, cd);
+ lock_kernel();
+ ret = cache_ioctl(inode, filp, cmd, arg, cd);
+ unlock_kernel();
+
+ return ret;
}
static int cache_open_pipefs(struct inode *inode, struct file *filp)
@@ -1573,7 +1580,7 @@ const struct file_operations cache_file_operations_pipefs = {
.read = cache_read_pipefs,
.write = cache_write_pipefs,
.poll = cache_poll_pipefs,
- .ioctl = cache_ioctl_pipefs, /* for FIONREAD */
+ .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
.open = cache_open_pipefs,
.release = cache_release_pipefs,
};
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 20e30c6f8355..95ccbcf45d3e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -27,6 +27,7 @@
#include <linux/workqueue.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/cache.h>
+#include <linux/smp_lock.h>
static struct vfsmount *rpc_mount __read_mostly;
static int rpc_mount_count;
@@ -309,8 +310,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
}
static int
-rpc_pipe_ioctl(struct inode *ino, struct file *filp,
- unsigned int cmd, unsigned long arg)
+rpc_pipe_ioctl_unlocked(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
int len;
@@ -331,13 +331,25 @@ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
}
}
+static long
+rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = rpc_pipe_ioctl_unlocked(filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations rpc_pipe_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = rpc_pipe_read,
.write = rpc_pipe_write,
.poll = rpc_pipe_poll,
- .ioctl = rpc_pipe_ioctl,
+ .unlocked_ioctl = rpc_pipe_ioctl,
.open = rpc_pipe_open,
.release = rpc_pipe_release,
};
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 121105355f60..dac219a56ae1 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -783,7 +783,7 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
port = ntohl(*p);
dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
task->tk_msg.rpc_proc->p_name, port);
- if (unlikely(port > USHORT_MAX))
+ if (unlikely(port > USHRT_MAX))
return -EIO;
rpcb->r_port = port;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3fc325399ee4..dcd0132396ba 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -166,7 +166,6 @@ EXPORT_SYMBOL_GPL(xprt_unregister_transport);
int xprt_load_transport(const char *transport_name)
{
struct xprt_class *t;
- char module_name[sizeof t->name + 5];
int result;
result = 0;
@@ -178,9 +177,7 @@ int xprt_load_transport(const char *transport_name)
}
}
spin_unlock(&xprt_list_lock);
- strcpy(module_name, "xprt");
- strncat(module_name, transport_name, sizeof t->name);
- result = request_module(module_name);
+ result = request_module("xprt%s", transport_name);
out:
return result;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b7cd8cccbe72..2a9675136c68 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
struct sockaddr *addr = args->dstaddr;
struct rpc_xprt *xprt;
struct sock_xprt *transport;
+ struct rpc_xprt *ret;
xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
if (IS_ERR(xprt))
@@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
break;
default:
- kfree(xprt);
- return ERR_PTR(-EAFNOSUPPORT);
+ ret = ERR_PTR(-EAFNOSUPPORT);
+ goto out_err;
}
if (xprt_bound(xprt))
@@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
if (try_module_get(THIS_MODULE))
return xprt;
-
+ ret = ERR_PTR(-EINVAL);
+out_err:
kfree(xprt->slot);
kfree(xprt);
- return ERR_PTR(-EINVAL);
+ return ret;
}
static const struct rpc_timeout xs_tcp_default_timeout = {
@@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
struct sockaddr *addr = args->dstaddr;
struct rpc_xprt *xprt;
struct sock_xprt *transport;
+ struct rpc_xprt *ret;
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
if (IS_ERR(xprt))
@@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
break;
default:
- kfree(xprt);
- return ERR_PTR(-EAFNOSUPPORT);
+ ret = ERR_PTR(-EAFNOSUPPORT);
+ goto out_err;
}
if (xprt_bound(xprt))
@@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
if (try_module_get(THIS_MODULE))
return xprt;
-
+ ret = ERR_PTR(-EINVAL);
+out_err:
kfree(xprt->slot);
kfree(xprt);
- return ERR_PTR(-EINVAL);
+ return ret;
}
/**
@@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
struct rpc_xprt *xprt;
struct sock_xprt *transport;
struct svc_sock *bc_sock;
+ struct rpc_xprt *ret;
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
if (IS_ERR(xprt))
@@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
RPCBIND_NETID_TCP6);
break;
default:
- kfree(xprt);
- return ERR_PTR(-EAFNOSUPPORT);
+ ret = ERR_PTR(-EAFNOSUPPORT);
+ goto out_err;
}
if (xprt_bound(xprt))
@@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
if (try_module_get(THIS_MODULE))
return xprt;
+ ret = ERR_PTR(-EINVAL);
+out_err:
kfree(xprt->slot);
kfree(xprt);
- return ERR_PTR(-EINVAL);
+ return ret;
}
static struct xprt_class xs_udp_transport = {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 6a329158bdfa..a3cca0a94346 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -95,13 +95,13 @@ resume:
goto error_nolock;
}
- dst = dst_pop(dst);
+ dst = skb_dst_pop(skb);
if (!dst) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
err = -EHOSTUNREACH;
goto error_nolock;
}
- skb_dst_set(skb, dst);
+ skb_dst_set_noref(skb, dst);
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d965a2bad8d3..a7ec5a8a2380 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
/* Try to instantiate a bundle */
err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
- if (err < 0) {
- if (err != -EAGAIN)
+ if (err <= 0) {
+ if (err != 0 && err != -EAGAIN)
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
return ERR_PTR(err);
}
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
goto make_dummy_bundle;
dst_hold(&xdst->u.dst);
return oldflo;
+ } else if (new_xdst == NULL) {
+ num_xfrms = 0;
+ if (oldflo == NULL)
+ goto make_dummy_bundle;
+ xdst->num_xfrms = 0;
+ dst_hold(&xdst->u.dst);
+ return oldflo;
}
/* Kill the previous bundle */
@@ -1760,6 +1767,10 @@ restart:
xfrm_pols_put(pols, num_pols);
err = PTR_ERR(xdst);
goto dropdst;
+ } else if (xdst == NULL) {
+ num_xfrms = 0;
+ drop_pols = num_pols;
+ goto no_transform;
}
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
@@ -2153,6 +2164,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
return 0;
}
+ skb_dst_force(skb);
dst = skb_dst(skb);
res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
@@ -2299,7 +2311,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
return 0;
if (xdst->xfrm_genid != dst->xfrm->genid)
return 0;
- if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
+ if (xdst->num_pols > 0 &&
+ xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
return 0;
if (strict && fl &&
OpenPOWER on IntegriCloud