diff options
author | Peter Zijlstra <peterz@infradead.org> | 2011-11-10 13:01:10 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-11-16 08:43:43 +0100 |
commit | 4dcfe1025b513c2c1da5bf5586adb0e80148f612 (patch) | |
tree | 1c0bf8ad4c09a3a618c471b6468873d8d8ed2117 /kernel | |
parent | f1c6f1a7eed963ed233ba4c8b6fa8addb86c6ddc (diff) | |
download | blackbird-op-linux-4dcfe1025b513c2c1da5bf5586adb0e80148f612.tar.gz blackbird-op-linux-4dcfe1025b513c2c1da5bf5586adb0e80148f612.zip |
sched: Avoid SMT siblings in select_idle_sibling() if possible
Avoid select_idle_sibling() from picking a sibling thread if there's
an idle core that shares cache.
This fixes SMT balancing in the increasingly common case where there's
a shared cache core available to balance to.
Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1321350377.1421.55.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7e51b5bb27cc..ba0e1f49a22f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target) int cpu = smp_processor_id(); int prev_cpu = task_cpu(p); struct sched_domain *sd; - int i; + struct sched_group *sg; + int i, smt = 0; /* * If the task is going to be woken-up on this cpu and if it is @@ -2346,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target) * Otherwise, iterate the domains and find an elegible idle cpu. */ rcu_read_lock(); +again: for_each_domain(target, sd) { - if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) - break; + if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) + continue; - for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { - if (idle_cpu(i)) { - target = i; - break; + if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { + if (!smt) { + smt = 1; + goto again; } + break; } - /* - * Lets stop looking for an idle sibling when we reached - * the domain that spans the current cpu and prev_cpu. - */ - if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && - cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) - break; + sg = sd->groups; + do { + if (!cpumask_intersects(sched_group_cpus(sg), + tsk_cpus_allowed(p))) + goto next; + + for_each_cpu(i, sched_group_cpus(sg)) { + if (!idle_cpu(i)) + goto next; + } + + target = cpumask_first_and(sched_group_cpus(sg), + tsk_cpus_allowed(p)); + goto done; +next: + sg = sg->next; + } while (sg != sd->groups); } +done: rcu_read_unlock(); return target; |