diff options
author | Gregory Haskins <ghaskins@novell.com> | 2008-01-25 21:08:23 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:23 +0100 |
commit | c49443c538c1bbf50eda27e4a3711e9fc15176b0 (patch) | |
tree | 8dece13f1f5c48422359c981a4ef8bba92136597 /kernel/sched_rt.c | |
parent | cdc8eb984ce47a7c90a049f45229f7b0d59ba781 (diff) | |
download | blackbird-op-linux-c49443c538c1bbf50eda27e4a3711e9fc15176b0.tar.gz blackbird-op-linux-c49443c538c1bbf50eda27e4a3711e9fc15176b0.zip |
sched: remove some old cpuset logic
We had support for overlapping cpuset based rto logic in early
prototypes that is no longer used, so remove it.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 33 |
1 files changed, 0 insertions, 33 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a386758ffebb..9affb3c9d3db 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -586,38 +586,6 @@ static int pull_rt_task(struct rq *this_rq) continue; src_rq = cpu_rq(cpu); - if (unlikely(src_rq->rt.rt_nr_running <= 1)) { - /* - * It is possible that overlapping cpusets - * will miss clearing a non overloaded runqueue. - * Clear it now. - */ - if (double_lock_balance(this_rq, src_rq)) { - /* unlocked our runqueue lock */ - struct task_struct *old_next = next; - - next = pick_next_task_rt(this_rq); - if (next != old_next) - ret = 1; - } - if (likely(src_rq->rt.rt_nr_running <= 1)) { - /* - * Small chance that this_rq->curr changed - * but it's really harmless here. - */ - rt_clear_overload(this_rq); - } else { - /* - * Heh, the src_rq is now overloaded, since - * we already have the src_rq lock, go straight - * to pulling tasks from it. - */ - goto try_pulling; - } - spin_unlock(&src_rq->lock); - continue; - } - /* * We can potentially drop this_rq's lock in * double_lock_balance, and another CPU could @@ -641,7 +609,6 @@ static int pull_rt_task(struct rq *this_rq) continue; } - try_pulling: p = pick_next_highest_task_rt(src_rq, this_cpu); /* |