summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/netprio_cgroup.c260
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/inet_diag.c154
-rw-r--r--net/ipv4/ip_fragment.c19
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/irda/ircomm/ircomm_tty.c1
-rw-r--r--net/sched/cls_cgroup.c28
10 files changed, 283 insertions, 223 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 47838509f5fd..d0cbc93fcf32 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3549,6 +3549,8 @@ static int napi_gro_complete(struct sk_buff *skb)
struct list_head *head = &offload_base;
int err = -ENOENT;
+ BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
+
if (NAPI_GRO_CB(skb)->count == 1) {
skb_shinfo(skb)->gso_size = 0;
goto out;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 847c02b197b0..5e67defe2cb0 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -27,11 +27,7 @@
#include <linux/fdtable.h>
-#define PRIOIDX_SZ 128
-
-static unsigned long prioidx_map[PRIOIDX_SZ];
-static DEFINE_SPINLOCK(prioidx_map_lock);
-static atomic_t max_prioidx = ATOMIC_INIT(0);
+#define PRIOMAP_MIN_SZ 128
static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
{
@@ -39,136 +35,157 @@ static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgr
struct cgroup_netprio_state, css);
}
-static int get_prioidx(u32 *prio)
-{
- unsigned long flags;
- u32 prioidx;
-
- spin_lock_irqsave(&prioidx_map_lock, flags);
- prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
- if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
- spin_unlock_irqrestore(&prioidx_map_lock, flags);
- return -ENOSPC;
- }
- set_bit(prioidx, prioidx_map);
- if (atomic_read(&max_prioidx) < prioidx)
- atomic_set(&max_prioidx, prioidx);
- spin_unlock_irqrestore(&prioidx_map_lock, flags);
- *prio = prioidx;
- return 0;
-}
-
-static void put_prioidx(u32 idx)
+/*
+ * Extend @dev->priomap so that it's large enough to accomodate
+ * @target_idx. @dev->priomap.priomap_len > @target_idx after successful
+ * return. Must be called under rtnl lock.
+ */
+static int extend_netdev_table(struct net_device *dev, u32 target_idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&prioidx_map_lock, flags);
- clear_bit(idx, prioidx_map);
- spin_unlock_irqrestore(&prioidx_map_lock, flags);
-}
+ struct netprio_map *old, *new;
+ size_t new_sz, new_len;
-static int extend_netdev_table(struct net_device *dev, u32 new_len)
-{
- size_t new_size = sizeof(struct netprio_map) +
- ((sizeof(u32) * new_len));
- struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
- struct netprio_map *old_priomap;
+ /* is the existing priomap large enough? */
+ old = rtnl_dereference(dev->priomap);
+ if (old && old->priomap_len > target_idx)
+ return 0;
- old_priomap = rtnl_dereference(dev->priomap);
+ /*
+ * Determine the new size. Let's keep it power-of-two. We start
+ * from PRIOMAP_MIN_SZ and double it until it's large enough to
+ * accommodate @target_idx.
+ */
+ new_sz = PRIOMAP_MIN_SZ;
+ while (true) {
+ new_len = (new_sz - offsetof(struct netprio_map, priomap)) /
+ sizeof(new->priomap[0]);
+ if (new_len > target_idx)
+ break;
+ new_sz *= 2;
+ /* overflowed? */
+ if (WARN_ON(new_sz < PRIOMAP_MIN_SZ))
+ return -ENOSPC;
+ }
- if (!new_priomap) {
+ /* allocate & copy */
+ new = kzalloc(new_sz, GFP_KERNEL);
+ if (!new) {
pr_warn("Unable to alloc new priomap!\n");
return -ENOMEM;
}
- if (old_priomap)
- memcpy(new_priomap->priomap, old_priomap->priomap,
- old_priomap->priomap_len *
- sizeof(old_priomap->priomap[0]));
+ if (old)
+ memcpy(new->priomap, old->priomap,
+ old->priomap_len * sizeof(old->priomap[0]));
- new_priomap->priomap_len = new_len;
+ new->priomap_len = new_len;
- rcu_assign_pointer(dev->priomap, new_priomap);
- if (old_priomap)
- kfree_rcu(old_priomap, rcu);
+ /* install the new priomap */
+ rcu_assign_pointer(dev->priomap, new);
+ if (old)
+ kfree_rcu(old, rcu);
return 0;
}
-static int write_update_netdev_table(struct net_device *dev)
+/**
+ * netprio_prio - return the effective netprio of a cgroup-net_device pair
+ * @cgrp: cgroup part of the target pair
+ * @dev: net_device part of the target pair
+ *
+ * Should be called under RCU read or rtnl lock.
+ */
+static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev)
+{
+ struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
+
+ if (map && cgrp->id < map->priomap_len)
+ return map->priomap[cgrp->id];
+ return 0;
+}
+
+/**
+ * netprio_set_prio - set netprio on a cgroup-net_device pair
+ * @cgrp: cgroup part of the target pair
+ * @dev: net_device part of the target pair
+ * @prio: prio to set
+ *
+ * Set netprio to @prio on @cgrp-@dev pair. Should be called under rtnl
+ * lock and may fail under memory pressure for non-zero @prio.
+ */
+static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev,
+ u32 prio)
{
- int ret = 0;
- u32 max_len;
struct netprio_map *map;
+ int ret;
- max_len = atomic_read(&max_prioidx) + 1;
+ /* avoid extending priomap for zero writes */
map = rtnl_dereference(dev->priomap);
- if (!map || map->priomap_len < max_len)
- ret = extend_netdev_table(dev, max_len);
+ if (!prio && (!map || map->priomap_len <= cgrp->id))
+ return 0;
- return ret;
+ ret = extend_netdev_table(dev, cgrp->id);
+ if (ret)
+ return ret;
+
+ map = rtnl_dereference(dev->priomap);
+ map->priomap[cgrp->id] = prio;
+ return 0;
}
-static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
+static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
{
struct cgroup_netprio_state *cs;
- int ret = -EINVAL;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
- if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
- goto out;
-
- ret = get_prioidx(&cs->prioidx);
- if (ret < 0) {
- pr_warn("No space in priority index array\n");
- goto out;
- }
-
return &cs->css;
-out:
- kfree(cs);
- return ERR_PTR(ret);
}
-static void cgrp_destroy(struct cgroup *cgrp)
+static int cgrp_css_online(struct cgroup *cgrp)
{
- struct cgroup_netprio_state *cs;
+ struct cgroup *parent = cgrp->parent;
struct net_device *dev;
- struct netprio_map *map;
+ int ret = 0;
+
+ if (!parent)
+ return 0;
- cs = cgrp_netprio_state(cgrp);
rtnl_lock();
+ /*
+ * Inherit prios from the parent. As all prios are set during
+ * onlining, there is no need to clear them on offline.
+ */
for_each_netdev(&init_net, dev) {
- map = rtnl_dereference(dev->priomap);
- if (map && cs->prioidx < map->priomap_len)
- map->priomap[cs->prioidx] = 0;
+ u32 prio = netprio_prio(parent, dev);
+
+ ret = netprio_set_prio(cgrp, dev, prio);
+ if (ret)
+ break;
}
rtnl_unlock();
- put_prioidx(cs->prioidx);
- kfree(cs);
+ return ret;
+}
+
+static void cgrp_css_free(struct cgroup *cgrp)
+{
+ kfree(cgrp_netprio_state(cgrp));
}
static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
{
- return (u64)cgrp_netprio_state(cgrp)->prioidx;
+ return cgrp->id;
}
static int read_priomap(struct cgroup *cont, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct net_device *dev;
- u32 prioidx = cgrp_netprio_state(cont)->prioidx;
- u32 priority;
- struct netprio_map *map;
rcu_read_lock();
- for_each_netdev_rcu(&init_net, dev) {
- map = rcu_dereference(dev->priomap);
- priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
- cb->fill(cb, dev->name, priority);
- }
+ for_each_netdev_rcu(&init_net, dev)
+ cb->fill(cb, dev->name, netprio_prio(cont, dev));
rcu_read_unlock();
return 0;
}
@@ -176,66 +193,24 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
const char *buffer)
{
- char *devname = kstrdup(buffer, GFP_KERNEL);
- int ret = -EINVAL;
- u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
- unsigned long priority;
- char *priostr;
+ char devname[IFNAMSIZ + 1];
struct net_device *dev;
- struct netprio_map *map;
-
- if (!devname)
- return -ENOMEM;
-
- /*
- * Minimally sized valid priomap string
- */
- if (strlen(devname) < 3)
- goto out_free_devname;
-
- priostr = strstr(devname, " ");
- if (!priostr)
- goto out_free_devname;
-
- /*
- *Separate the devname from the associated priority
- *and advance the priostr pointer to the priority value
- */
- *priostr = '\0';
- priostr++;
-
- /*
- * If the priostr points to NULL, we're at the end of the passed
- * in string, and its not a valid write
- */
- if (*priostr == '\0')
- goto out_free_devname;
-
- ret = kstrtoul(priostr, 10, &priority);
- if (ret < 0)
- goto out_free_devname;
+ u32 prio;
+ int ret;
- ret = -ENODEV;
+ if (sscanf(buffer, "%"__stringify(IFNAMSIZ)"s %u", devname, &prio) != 2)
+ return -EINVAL;
dev = dev_get_by_name(&init_net, devname);
if (!dev)
- goto out_free_devname;
+ return -ENODEV;
rtnl_lock();
- ret = write_update_netdev_table(dev);
- if (ret < 0)
- goto out_put_dev;
- map = rtnl_dereference(dev->priomap);
- if (map)
- map->priomap[prioidx] = priority;
+ ret = netprio_set_prio(cgrp, dev, prio);
-out_put_dev:
rtnl_unlock();
dev_put(dev);
-
-out_free_devname:
- kfree(devname);
return ret;
}
@@ -276,22 +251,13 @@ static struct cftype ss_files[] = {
struct cgroup_subsys net_prio_subsys = {
.name = "net_prio",
- .create = cgrp_create,
- .destroy = cgrp_destroy,
+ .css_alloc = cgrp_css_alloc,
+ .css_online = cgrp_css_online,
+ .css_free = cgrp_css_free,
.attach = net_prio_attach,
.subsys_id = net_prio_subsys_id,
.base_cftypes = ss_files,
.module = THIS_MODULE,
-
- /*
- * net_prio has artificial limit on the number of cgroups and
- * disallows nesting making it impossible to co-mount it with other
- * hierarchical subsystems. Remove the artificially low PRIOIDX_SZ
- * limit and properly nest configuration such that children follow
- * their parents' configurations by default and are allowed to
- * override and remove the following.
- */
- .broken_hierarchy = true,
};
static int netprio_device_event(struct notifier_block *unused,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ac9e44a6ab2b..3ab989b0de42 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3032,7 +3032,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_shinfo(nskb)->gso_size = pinfo->gso_size;
pinfo->gso_size = 0;
skb_header_release(p);
- nskb->prev = p;
+ NAPI_GRO_CB(nskb)->last = p;
nskb->data_len += p->len;
nskb->truesize += p->truesize;
@@ -3058,8 +3058,8 @@ merge:
__skb_pull(skb, offset);
- p->prev->next = skb;
- p->prev = skb;
+ NAPI_GRO_CB(p)->last->next = skb;
+ NAPI_GRO_CB(p)->last = skb;
skb_header_release(skb);
done:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index cb98cbed1973..7afa2c3c788f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -44,6 +44,10 @@ struct inet_diag_entry {
u16 dport;
u16 family;
u16 userlocks;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
+ struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
+#endif
};
static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -431,25 +435,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
break;
}
- if (cond->prefix_len == 0)
- break;
-
if (op->code == INET_DIAG_BC_S_COND)
addr = entry->saddr;
else
addr = entry->daddr;
+ if (cond->family != AF_UNSPEC &&
+ cond->family != entry->family) {
+ if (entry->family == AF_INET6 &&
+ cond->family == AF_INET) {
+ if (addr[0] == 0 && addr[1] == 0 &&
+ addr[2] == htonl(0xffff) &&
+ bitstring_match(addr + 3,
+ cond->addr,
+ cond->prefix_len))
+ break;
+ }
+ yes = 0;
+ break;
+ }
+
+ if (cond->prefix_len == 0)
+ break;
if (bitstring_match(addr, cond->addr,
cond->prefix_len))
break;
- if (entry->family == AF_INET6 &&
- cond->family == AF_INET) {
- if (addr[0] == 0 && addr[1] == 0 &&
- addr[2] == htonl(0xffff) &&
- bitstring_match(addr + 3, cond->addr,
- cond->prefix_len))
- break;
- }
yes = 0;
break;
}
@@ -512,6 +522,55 @@ static int valid_cc(const void *bc, int len, int cc)
return 0;
}
+/* Validate an inet_diag_hostcond. */
+static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
+ int *min_len)
+{
+ int addr_len;
+ struct inet_diag_hostcond *cond;
+
+ /* Check hostcond space. */
+ *min_len += sizeof(struct inet_diag_hostcond);
+ if (len < *min_len)
+ return false;
+ cond = (struct inet_diag_hostcond *)(op + 1);
+
+ /* Check address family and address length. */
+ switch (cond->family) {
+ case AF_UNSPEC:
+ addr_len = 0;
+ break;
+ case AF_INET:
+ addr_len = sizeof(struct in_addr);
+ break;
+ case AF_INET6:
+ addr_len = sizeof(struct in6_addr);
+ break;
+ default:
+ return false;
+ }
+ *min_len += addr_len;
+ if (len < *min_len)
+ return false;
+
+ /* Check prefix length (in bits) vs address length (in bytes). */
+ if (cond->prefix_len > 8 * addr_len)
+ return false;
+
+ return true;
+}
+
+/* Validate a port comparison operator. */
+static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
+ int len, int *min_len)
+{
+ /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
+ *min_len += sizeof(struct inet_diag_bc_op);
+ if (len < *min_len)
+ return false;
+ return true;
+}
+
static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
{
const void *bc = bytecode;
@@ -519,29 +578,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
while (len > 0) {
const struct inet_diag_bc_op *op = bc;
+ int min_len = sizeof(struct inet_diag_bc_op);
//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
switch (op->code) {
- case INET_DIAG_BC_AUTO:
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND:
+ if (!valid_hostcond(bc, len, &min_len))
+ return -EINVAL;
+ break;
case INET_DIAG_BC_S_GE:
case INET_DIAG_BC_S_LE:
case INET_DIAG_BC_D_GE:
case INET_DIAG_BC_D_LE:
- case INET_DIAG_BC_JMP:
- if (op->no < 4 || op->no > len + 4 || op->no & 3)
- return -EINVAL;
- if (op->no < len &&
- !valid_cc(bytecode, bytecode_len, len - op->no))
+ if (!valid_port_comparison(bc, len, &min_len))
return -EINVAL;
break;
+ case INET_DIAG_BC_AUTO:
+ case INET_DIAG_BC_JMP:
case INET_DIAG_BC_NOP:
break;
default:
return -EINVAL;
}
- if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
+
+ if (op->code != INET_DIAG_BC_NOP) {
+ if (op->no < min_len || op->no > len + 4 || op->no & 3)
+ return -EINVAL;
+ if (op->no < len &&
+ !valid_cc(bytecode, bytecode_len, len - op->no))
+ return -EINVAL;
+ }
+
+ if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
return -EINVAL;
bc += op->yes;
len -= op->yes;
@@ -599,6 +668,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
+/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
+ * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
+ */
+static inline void inet_diag_req_addrs(const struct sock *sk,
+ const struct request_sock *req,
+ struct inet_diag_entry *entry)
+{
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ if (req->rsk_ops->family == AF_INET6) {
+ entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
+ entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
+ } else if (req->rsk_ops->family == AF_INET) {
+ ipv6_addr_set_v4mapped(ireq->loc_addr,
+ &entry->saddr_storage);
+ ipv6_addr_set_v4mapped(ireq->rmt_addr,
+ &entry->daddr_storage);
+ entry->saddr = entry->saddr_storage.s6_addr32;
+ entry->daddr = entry->daddr_storage.s6_addr32;
+ }
+ } else
+#endif
+ {
+ entry->saddr = &ireq->loc_addr;
+ entry->daddr = &ireq->rmt_addr;
+ }
+}
+
static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
struct request_sock *req,
struct user_namespace *user_ns,
@@ -640,8 +739,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_inode = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
- *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
- *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
+ struct inet_diag_entry entry;
+ inet_diag_req_addrs(sk, req, &entry);
+ memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
+ memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
}
#endif
@@ -694,18 +795,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
continue;
if (bc) {
- entry.saddr =
-#if IS_ENABLED(CONFIG_IPV6)
- (entry.family == AF_INET6) ?
- inet6_rsk(req)->loc_addr.s6_addr32 :
-#endif
- &ireq->loc_addr;
- entry.daddr =
-#if IS_ENABLED(CONFIG_IPV6)
- (entry.family == AF_INET6) ?
- inet6_rsk(req)->rmt_addr.s6_addr32 :
-#endif
- &ireq->rmt_addr;
+ inet_diag_req_addrs(sk, req, &entry);
entry.dport = ntohs(ireq->rmt_port);
if (!inet_diag_bc_run(bc, &entry))
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1cf6a768cd53..eb9d63a570cd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -707,28 +707,27 @@ EXPORT_SYMBOL(ip_defrag);
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
{
- const struct iphdr *iph;
+ struct iphdr iph;
u32 len;
if (skb->protocol != htons(ETH_P_IP))
return skb;
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
return skb;
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
+ if (iph.ihl < 5 || iph.version != 4)
return skb;
- if (!pskb_may_pull(skb, iph->ihl*4))
- return skb;
- iph = ip_hdr(skb);
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < (iph->ihl * 4))
+
+ len = ntohs(iph.tot_len);
+ if (skb->len < len || len < (iph.ihl * 4))
return skb;
- if (ip_is_fragment(ip_hdr(skb))) {
+ if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
+ if (!pskb_may_pull(skb, iph.ihl*4))
+ return skb;
if (pskb_trim_rcsum(skb, len))
return skb;
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8fc5b3bd6075..1ca253635f7a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -831,8 +831,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now;
}
-static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
- size_t psize, int flags)
+static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
int mss_now, size_goal;
@@ -859,12 +859,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
- while (psize > 0) {
+ while (size > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
- struct page *page = pages[poffset / PAGE_SIZE];
int copy, i;
- int offset = poffset % PAGE_SIZE;
- int size = min_t(size_t, psize, PAGE_SIZE - offset);
bool can_coalesce;
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
@@ -913,8 +910,8 @@ new_segment:
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy;
- poffset += copy;
- if (!(psize -= copy))
+ offset += copy;
+ if (!(size -= copy))
goto out;
if (skb->len < size_goal || (flags & MSG_OOB))
@@ -961,7 +958,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
flags);
lock_sock(sk);
- res = do_tcp_sendpages(sk, &page, offset, size, flags);
+ res = do_tcp_sendpages(sk, page, offset, size, flags);
release_sock(sk);
return res;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fc67831656e5..a13692560e63 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5652,7 +5652,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
if (data) { /* Retransmit unacked data in SYN */
- tcp_retransmit_skb(sk, data);
+ tcp_for_write_queue_from(data, sk) {
+ if (data == tcp_send_head(sk) ||
+ __tcp_retransmit_skb(sk, data))
+ break;
+ }
tcp_rearm_rto(sk);
return true;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8ac085573217..5d451593ef16 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2309,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
* state updates are done by the caller. Returns non-zero if an
* error occurred which prevented the send.
*/
-int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss;
- int err;
/* Inconslusive MTU probe */
if (icsk->icsk_mtup.probe_size) {
@@ -2387,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
GFP_ATOMIC);
- err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
- -ENOBUFS;
+ return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ -ENOBUFS;
} else {
- err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
+}
+
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int err = __tcp_retransmit_skb(sk, skb);
if (err == 0) {
/* Update global TCP statistics. */
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 496ce2cebcd7..a68c88cdec6e 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -183,6 +183,7 @@ static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
ircomm_tty_shutdown(self);
self->magic = 0;
+ tty_port_destroy(&self->port);
kfree(self);
}
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 709b0fb38a18..6db7855b9029 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -35,21 +35,25 @@ static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
struct cgroup_cls_state, css);
}
-static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
+static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
{
struct cgroup_cls_state *cs;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
+ return &cs->css;
+}
+static int cgrp_css_online(struct cgroup *cgrp)
+{
if (cgrp->parent)
- cs->classid = cgrp_cls_state(cgrp->parent)->classid;
-
- return &cs->css;
+ cgrp_cls_state(cgrp)->classid =
+ cgrp_cls_state(cgrp->parent)->classid;
+ return 0;
}
-static void cgrp_destroy(struct cgroup *cgrp)
+static void cgrp_css_free(struct cgroup *cgrp)
{
kfree(cgrp_cls_state(cgrp));
}
@@ -98,21 +102,13 @@ static struct cftype ss_files[] = {
struct cgroup_subsys net_cls_subsys = {
.name = "net_cls",
- .create = cgrp_create,
- .destroy = cgrp_destroy,
+ .css_alloc = cgrp_css_alloc,
+ .css_online = cgrp_css_online,
+ .css_free = cgrp_css_free,
.attach = cgrp_attach,
.subsys_id = net_cls_subsys_id,
.base_cftypes = ss_files,
.module = THIS_MODULE,
-
- /*
- * While net_cls cgroup has the rudimentary hierarchy support of
- * inheriting the parent's classid on cgroup creation, it doesn't
- * properly propagates config changes in ancestors to their
- * descendents. A child should follow the parent's configuration
- * but be allowed to override it. Fix it and remove the following.
- */
- .broken_hierarchy = true,
};
struct cls_cgroup_head {
OpenPOWER on IntegriCloud