diff options
author | Eric Dumazet <edumazet@google.com> | 2013-12-15 13:15:25 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-12-17 15:25:20 -0500 |
commit | c3bd85495aef69e9df2e03a450a3a1f195c3867c (patch) | |
tree | fe8543f71cf8483c830a78d8dab7a3202e8b1f02 /net/sched | |
parent | e7ef941d3e2ea92d564a0bb9b35b1f392b3a171e (diff) | |
download | blackbird-op-linux-c3bd85495aef69e9df2e03a450a3a1f195c3867c.tar.gz blackbird-op-linux-c3bd85495aef69e9df2e03a450a3a1f195c3867c.zip |
pkt_sched: fq: more robust memory allocation
This patch brings NUMA support and automatic fallback to vmalloc()
in case kmalloc() failed to allocate FQ hash table.
NUMA support depends on XPS being setup for the device before
qdisc allocation. After a XPS change, it might be worth creating
qdisc hierarchy again.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_fq.c | 34 |
1 files changed, 28 insertions, 6 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 95d843961907..f2fb92dd970d 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -47,6 +47,7 @@ #include <linux/rbtree.h> #include <linux/hash.h> #include <linux/prefetch.h> +#include <linux/vmalloc.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/sock.h> @@ -578,15 +579,36 @@ static void fq_rehash(struct fq_sched_data *q, q->stat_gc_flows += fcnt; } -static int fq_resize(struct fq_sched_data *q, u32 log) +static void *fq_alloc_node(size_t sz, int node) { + void *ptr; + + ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node); + if (!ptr) + ptr = vmalloc_node(sz, node); + return ptr; +} + +static void fq_free(void *addr) +{ + if (addr && is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +static int fq_resize(struct Qdisc *sch, u32 log) +{ + struct fq_sched_data *q = qdisc_priv(sch); struct rb_root *array; u32 idx; if (q->fq_root && log == q->fq_trees_log) return 0; - array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL); + /* If XPS was setup, we can allocate memory on right NUMA node */ + array = fq_alloc_node(sizeof(struct rb_root) << log, + netdev_queue_numa_node_read(sch->dev_queue)); if (!array) return -ENOMEM; @@ -595,7 +617,7 @@ static int fq_resize(struct fq_sched_data *q, u32 log) if (q->fq_root) { fq_rehash(q, q->fq_root, q->fq_trees_log, array, log); - kfree(q->fq_root); + fq_free(q->fq_root); } q->fq_root = array; q->fq_trees_log = log; @@ -676,7 +698,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) } if (!err) - err = fq_resize(q, fq_log); + err = fq_resize(sch, fq_log); while (sch->q.qlen > sch->limit) { struct sk_buff *skb = fq_dequeue(sch); @@ -697,7 +719,7 @@ static void fq_destroy(struct Qdisc *sch) struct fq_sched_data *q = qdisc_priv(sch); fq_reset(sch); - kfree(q->fq_root); + fq_free(q->fq_root); qdisc_watchdog_cancel(&q->watchdog); } @@ -723,7 +745,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt) if (opt) err = fq_change(sch, opt); else - err = fq_resize(q, q->fq_trees_log); + err = fq_resize(sch, q->fq_trees_log); return err; } |