summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2015-08-03 11:55:50 +0200
committerIngo Molnar <mingo@kernel.org>2015-09-13 09:52:51 +0200
commitc5afb6a87f2386bcf09fa051e6ca390d43e2222e (patch)
tree3ba5357eeeb0d056db25c8e7869c4cbd854c40cc /kernel/sched
parent446685e9bfa11174332fbb0b3218b37015fbf4ff (diff)
downloadblackbird-op-linux-c5afb6a87f2386bcf09fa051e6ca390d43e2222e.tar.gz
blackbird-op-linux-c5afb6a87f2386bcf09fa051e6ca390d43e2222e.zip
sched/fair: Fix nohz.next_balance update
Since commit: d4573c3e1c99 ("sched: Improve load balancing in the presence of idle CPUs") the ILB CPU starts with the idle load balancing of other idle CPUs and finishes with itself in order to speed up the spread of tasks in all idle CPUs. The this_rq->next_balance is still used in nohz_idle_balance() as an intermediate step to gather the shortest next balance before updating nohz.next_balance. But the former has not been updated yet and is likely to be set with the current jiffies. As a result, the nohz.next_balance will be set with current jiffies instead of the real next balance date. This generates spurious kicks of nohz ilde balance. nohz_idle_balance() must set the nohz.next_balance without taking into account this_rq->next_balance which is not updated yet. Then, this_rq will update nohz.next_update with its next_balance once updated and if necessary. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1438595750-20455-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4e305d174b55..36774e5dab01 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7647,8 +7647,22 @@ out:
* When the cpu is attached to null domain for ex, it will not be
* updated.
*/
- if (likely(update_next_balance))
+ if (likely(update_next_balance)) {
rq->next_balance = next_balance;
+
+#ifdef CONFIG_NO_HZ_COMMON
+ /*
+ * If this CPU has been elected to perform the nohz idle
+ * balance. Other idle CPUs have already rebalanced with
+ * nohz_idle_balance() and nohz.next_balance has been
+ * updated accordingly. This CPU is now running the idle load
+ * balance for itself and we need to update the
+ * nohz.next_balance accordingly.
+ */
+ if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
+ nohz.next_balance = rq->next_balance;
+#endif
+ }
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -7661,6 +7675,9 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
int this_cpu = this_rq->cpu;
struct rq *rq;
int balance_cpu;
+ /* Earliest time when we have to do rebalance again */
+ unsigned long next_balance = jiffies + 60*HZ;
+ int update_next_balance = 0;
if (idle != CPU_IDLE ||
!test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
@@ -7692,10 +7709,19 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
rebalance_domains(rq, CPU_IDLE);
}
- if (time_after(this_rq->next_balance, rq->next_balance))
- this_rq->next_balance = rq->next_balance;
+ if (time_after(next_balance, rq->next_balance)) {
+ next_balance = rq->next_balance;
+ update_next_balance = 1;
+ }
}
- nohz.next_balance = this_rq->next_balance;
+
+ /*
+ * next_balance will be updated only when there is a need.
+ * When the CPU is attached to null domain for ex, it will not be
+ * updated.
+ */
+ if (likely(update_next_balance))
+ nohz.next_balance = next_balance;
end:
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
OpenPOWER on IntegriCloud