diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-21 09:56:13 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-21 10:10:50 -0700 |
commit | d3678b463df73f5060d7420915080e19baeb379b (patch) | |
tree | 4f4fc3e49bb0a9d21b9280f1c1a990eb03651564 /net/sched | |
parent | 867d79fb9a4d5929ad8335c896fcfe11c3b2ef14 (diff) | |
download | blackbird-op-linux-d3678b463df73f5060d7420915080e19baeb379b.tar.gz blackbird-op-linux-d3678b463df73f5060d7420915080e19baeb379b.zip |
Revert "pkt_sched: Make default qdisc nonshared-multiqueue safe."
This reverts commit a0c80b80e0fb48129e4e9d6a9ede914f9ff1850d.
After discussions with Jamal and Herbert on netdev, we should
provide at least minimal prioritization at the qdisc level
even in multiqueue situations.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 99 |
1 files changed, 77 insertions, 22 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 09dead335805..cb625b4d6da5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -356,44 +356,99 @@ static struct Qdisc noqueue_qdisc = { }; -static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) +static const u8 prio2band[TC_PRIO_MAX+1] = + { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; + +/* 3-band FIFO queue: old style, but should be a bit faster than + generic prio+fifo combination. + */ + +#define PFIFO_FAST_BANDS 3 + +static inline struct sk_buff_head *prio2list(struct sk_buff *skb, + struct Qdisc *qdisc) +{ + struct sk_buff_head *list = qdisc_priv(qdisc); + return list + prio2band[skb->priority & TC_PRIO_MAX]; +} + +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { - struct sk_buff_head *list = &qdisc->q; + struct sk_buff_head *list = prio2list(skb, qdisc); - if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) + if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { + qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); + } return qdisc_drop(skb, qdisc); } -static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc) +static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { - struct sk_buff_head *list = &qdisc->q; + int prio; + struct sk_buff_head *list = qdisc_priv(qdisc); - if (!skb_queue_empty(list)) - return __qdisc_dequeue_head(qdisc, list); + for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { + if (!skb_queue_empty(list + prio)) { + qdisc->q.qlen--; + return __qdisc_dequeue_head(qdisc, list + prio); + } + } return NULL; } -static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) +static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) { - return __qdisc_requeue(skb, qdisc, &qdisc->q); + qdisc->q.qlen++; + return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); } -static void fifo_fast_reset(struct Qdisc* qdisc) +static void pfifo_fast_reset(struct Qdisc* qdisc) { - __qdisc_reset_queue(qdisc, &qdisc->q); + int prio; + struct sk_buff_head *list = qdisc_priv(qdisc); + + for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) + __qdisc_reset_queue(qdisc, list + prio); + qdisc->qstats.backlog = 0; + qdisc->q.qlen = 0; } -static struct Qdisc_ops fifo_fast_ops __read_mostly = { - .id = "fifo_fast", - .priv_size = 0, - .enqueue = fifo_fast_enqueue, - .dequeue = fifo_fast_dequeue, - .requeue = fifo_fast_requeue, - .reset = fifo_fast_reset, +static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) +{ + struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; + + memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + return skb->len; + +nla_put_failure: + return -1; +} + +static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) +{ + int prio; + struct sk_buff_head *list = qdisc_priv(qdisc); + + for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) + skb_queue_head_init(list + prio); + + return 0; +} + +static struct Qdisc_ops pfifo_fast_ops __read_mostly = { + .id = "pfifo_fast", + .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), + .enqueue = pfifo_fast_enqueue, + .dequeue = pfifo_fast_dequeue, + .requeue = pfifo_fast_requeue, + .init = pfifo_fast_init, + .reset = pfifo_fast_reset, + .dump = pfifo_fast_dump, .owner = THIS_MODULE, }; @@ -522,7 +577,7 @@ static void attach_one_default_qdisc(struct net_device *dev, if (dev->tx_queue_len) { qdisc = qdisc_create_dflt(dev, dev_queue, - &fifo_fast_ops, TC_H_ROOT); + &pfifo_fast_ops, TC_H_ROOT); if (!qdisc) { printk(KERN_INFO "%s: activation failed\n", dev->name); return; @@ -550,9 +605,9 @@ void dev_activate(struct net_device *dev) int need_watchdog; /* No queueing discipline is attached to device; - * create default one i.e. fifo_fast for devices, - * which need queueing and noqueue_qdisc for - * virtual interfaces. + create default one i.e. pfifo_fast for devices, + which need queueing and noqueue_qdisc for + virtual interfaces */ if (dev_all_qdisc_sleeping_noop(dev)) |