summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-06-22 15:14:26 +0200
committerIngo Molnar <mingo@kernel.org>2016-06-27 12:53:19 +0200
commit55e16d30bd99510900caec913c90f53bc2b35cba (patch)
tree42984b0b0daaae83ea3c7b9e35d65ce9d7bf1c39 /kernel/sched
parent599b4840b0ea453c7d11e1798dcc8f494dcfd58a (diff)
downloadblackbird-op-linux-55e16d30bd99510900caec913c90f53bc2b35cba.tar.gz
blackbird-op-linux-55e16d30bd99510900caec913c90f53bc2b35cba.zip
sched/fair: Rework throttle_count sync
Since we already take rq->lock when creating a cgroup, use it to also sync the throttle_count and avoid the extra state and enqueue path branch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bsegall@google.com Cc: linux-kernel@vger.kernel.org [ Fixed build warning. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c39
-rw-r--r--kernel/sched/sched.h2
2 files changed, 20 insertions, 21 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 62d5e7dcc7f8..4088eedea763 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4241,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
if (!cfs_bandwidth_used())
return;
- /* Synchronize hierarchical throttle counter: */
- if (unlikely(!cfs_rq->throttle_uptodate)) {
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_rq *pcfs_rq;
- struct task_group *tg;
-
- cfs_rq->throttle_uptodate = 1;
-
- /* Get closest up-to-date node, because leaves go first: */
- for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
- pcfs_rq = tg->cfs_rq[cpu_of(rq)];
- if (pcfs_rq->throttle_uptodate)
- break;
- }
- if (tg) {
- cfs_rq->throttle_count = pcfs_rq->throttle_count;
- cfs_rq->throttled_clock_task = rq_clock_task(rq);
- }
- }
-
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
@@ -4275,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
throttle_cfs_rq(cfs_rq);
}
+static void sync_throttle(struct task_group *tg, int cpu)
+{
+ struct cfs_rq *pcfs_rq, *cfs_rq;
+
+ if (!cfs_bandwidth_used())
+ return;
+
+ if (!tg->parent)
+ return;
+
+ cfs_rq = tg->cfs_rq[cpu];
+ pcfs_rq = tg->parent->cfs_rq[cpu];
+
+ cfs_rq->throttle_count = pcfs_rq->throttle_count;
+ pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+}
+
/* conditionally throttle active cfs_rq's from put_prev_entity() */
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
@@ -4414,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
+static inline void sync_throttle(struct task_group *tg, int cpu) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
@@ -8646,6 +8644,7 @@ void online_fair_sched_group(struct task_group *tg)
raw_spin_lock_irq(&rq->lock);
post_init_entity_util_avg(se);
+ sync_throttle(tg, i);
raw_spin_unlock_irq(&rq->lock);
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 28c42b789f70..f44da95c70cd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -438,7 +438,7 @@ struct cfs_rq {
u64 throttled_clock, throttled_clock_task;
u64 throttled_clock_task_time;
- int throttled, throttle_count, throttle_uptodate;
+ int throttled, throttle_count;
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
OpenPOWER on IntegriCloud