summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2015-02-27 16:54:09 +0100
committerIngo Molnar <mingo@kernel.org>2015-03-27 09:36:02 +0100
commitca6d75e6908efbc350d536e0b496ebdac36b20d2 (patch)
treee2e5c20604a6348678176a8f35755489d390a56a /kernel/sched
parentb5b4860d1d61ddc5308c7d492cbeaa3a6e508d7f (diff)
downloadtalos-op-linux-ca6d75e6908efbc350d536e0b496ebdac36b20d2.tar.gz
talos-op-linux-ca6d75e6908efbc350d536e0b496ebdac36b20d2.zip
sched: Add struct rq::cpu_capacity_orig
This new field 'cpu_capacity_orig' reflects the original capacity of a CPU before being altered by rt tasks and/or IRQ The cpu_capacity_orig will be used: - to detect when the capacity of a CPU has been noticeably reduced so we can trig load balance to look for a CPU with better capacity. As an example, we can detect when a CPU handles a significant amount of irq (with CONFIG_IRQ_TIME_ACCOUNTING) but this CPU is seen as an idle CPU by scheduler whereas CPUs, which are really idle, are available. - evaluate the available capacity for CFS tasks Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Acked-by: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Morten.Rasmussen@arm.com Cc: dietmar.eggemann@arm.com Cc: efault@gmx.de Cc: linaro-kernel@lists.linaro.org Cc: nicolas.pitre@linaro.org Cc: preeti@linux.vnet.ibm.com Cc: riel@redhat.com Link: http://lkml.kernel.org/r/1425052454-25797-7-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/sched.h1
3 files changed, 9 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index feda520bd034..7022e9084624 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7216,7 +7216,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
- rq->cpu_capacity = SCHED_CAPACITY_SCALE;
+ rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dc7c693f044a..10f84c3c6769 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4363,6 +4363,11 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
+static unsigned long capacity_orig_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity_orig;
+}
+
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -6040,6 +6045,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
capacity >>= SCHED_CAPACITY_SHIFT;
+ cpu_rq(cpu)->cpu_capacity_orig = capacity;
sdg->sgc->capacity_orig = capacity;
capacity *= scale_rt_capacity(cpu);
@@ -6094,7 +6100,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
* Runtime updates will correct capacity_orig.
*/
if (unlikely(!rq->sd)) {
- capacity_orig += capacity_of(cpu);
+ capacity_orig += capacity_orig_of(cpu);
capacity += capacity_of(cpu);
continue;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 36000029f33b..be56dfd645b2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -615,6 +615,7 @@ struct rq {
struct sched_domain *sd;
unsigned long cpu_capacity;
+ unsigned long cpu_capacity_orig;
unsigned char idle_balance;
/* For active balancing */
OpenPOWER on IntegriCloud