summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c76
1 files changed, 29 insertions, 47 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 03e389e8d945..9e43ed949167 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -405,40 +405,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
-static int
-cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl;
- int ret;
-
- if ((cl = q->tx_class) == NULL) {
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_CN;
- }
- q->tx_class = NULL;
-
- cbq_mark_toplevel(q, cl);
-
-#ifdef CONFIG_NET_CLS_ACT
- q->rx_class = cl;
- cl->q->__parent = sch;
-#endif
- if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
- sch->q.qlen++;
- sch->qstats.requeues++;
- if (!cl->next_alive)
- cbq_activate_class(cl);
- return 0;
- }
- if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- }
- return ret;
-}
-
/* Overlimit actions */
/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
@@ -1669,7 +1635,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
#endif
}
sch_tree_lock(sch);
- *old = xchg(&cl->q, new);
+ *old = cl->q;
+ cl->q = new;
qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
qdisc_reset(*old);
sch_tree_unlock(sch);
@@ -1798,11 +1765,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
}
if (tb[TCA_CBQ_RATE]) {
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+ rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
+ tb[TCA_CBQ_RTAB]);
if (rtab == NULL)
return -EINVAL;
}
+ if (tca[TCA_RATE]) {
+ err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE]);
+ if (err) {
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ return err;
+ }
+ }
+
/* Change class parameters */
sch_tree_lock(sch);
@@ -1810,8 +1789,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cbq_deactivate_class(cl);
if (rtab) {
- rtab = xchg(&cl->R_tab, rtab);
- qdisc_put_rtab(rtab);
+ qdisc_put_rtab(cl->R_tab);
+ cl->R_tab = rtab;
}
if (tb[TCA_CBQ_LSSOPT])
@@ -1838,10 +1817,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
sch_tree_unlock(sch);
- if (tca[TCA_RATE])
- gen_replace_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_sleeping_lock(sch),
- tca[TCA_RATE]);
return 0;
}
@@ -1888,6 +1863,17 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (cl == NULL)
goto failure;
+
+ if (tca[TCA_RATE]) {
+ err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE]);
+ if (err) {
+ kfree(cl);
+ goto failure;
+ }
+ }
+
cl->R_tab = rtab;
rtab = NULL;
cl->refcnt = 1;
@@ -1929,10 +1915,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
qdisc_class_hash_grow(sch, &q->clhash);
- if (tca[TCA_RATE])
- gen_new_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
-
*arg = (unsigned long)cl;
return 0;
@@ -2066,7 +2048,7 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct cbq_sched_data),
.enqueue = cbq_enqueue,
.dequeue = cbq_dequeue,
- .requeue = cbq_requeue,
+ .peek = qdisc_peek_dequeued,
.drop = cbq_drop,
.init = cbq_init,
.reset = cbq_reset,
OpenPOWER on IntegriCloud