summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-09-10 00:26:16 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 10:06:23 -0700
commitd6d5cfaf4551aa7713ca6ab73bb77e832602204b (patch)
tree2741ff061bca597f26df340feec0a6ee58820e64 /kernel/sched.c
parent67f9a619e7460b7d07284a9d0745727a77d3ade6 (diff)
downloadblackbird-op-linux-d6d5cfaf4551aa7713ca6ab73bb77e832602204b.tar.gz
blackbird-op-linux-d6d5cfaf4551aa7713ca6ab73bb77e832602204b.zip
[PATCH] sched: less newidle locking
Similarly to the earlier change in load_balance, only lock the runqueue in load_balance_newidle if the busiest queue found has a nr_running > 1. This will reduce frequency of expensive remote runqueue lock aquisitions in the schedule() path on some workloads. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c61ee3451a04..930189540f3b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2104,8 +2104,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
*/
double_lock_balance(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
- imbalance, sd, idle,
- &all_pinned);
+ imbalance, sd, idle, &all_pinned);
spin_unlock(&busiest->lock);
/* All tasks on this runqueue were pinned by CPU affinity */
@@ -2200,18 +2199,22 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
BUG_ON(busiest == this_rq);
- /* Attempt to move tasks */
- double_lock_balance(this_rq, busiest);
-
schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
- nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+ nr_moved = 0;
+ if (busiest->nr_running > 1) {
+ /* Attempt to move tasks */
+ double_lock_balance(this_rq, busiest);
+ nr_moved = move_tasks(this_rq, this_cpu, busiest,
imbalance, sd, NEWLY_IDLE, NULL);
+ spin_unlock(&busiest->lock);
+ }
+
if (!nr_moved)
schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
else
sd->nr_balance_failed = 0;
- spin_unlock(&busiest->lock);
return nr_moved;
out_balanced:
OpenPOWER on IntegriCloud