diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-06-11 14:46:37 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-06-19 00:25:26 +0200 |
commit | e3fca9e7cbfb72694a21c886fcdf9f059cfded9c (patch) | |
tree | 59d47a653d9c86b7537810344c6d5d7702785fe1 /kernel/sched/core.c | |
parent | 624bbdfac99c50bf03dff9a0023f666b8e965627 (diff) | |
download | blackbird-op-linux-e3fca9e7cbfb72694a21c886fcdf9f059cfded9c.tar.gz blackbird-op-linux-e3fca9e7cbfb72694a21c886fcdf9f059cfded9c.zip |
sched: Replace post_schedule with a balance callback list
Generalize the post_schedule() stuff into a balance callback list.
This allows us to more easily use it outside of schedule() and cross
sched_class.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.424032725@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 41942a5f3315..fa32bc09dadf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2277,23 +2277,35 @@ static struct rq *finish_task_switch(struct task_struct *prev) #ifdef CONFIG_SMP /* rq->lock is NOT held, but preemption is disabled */ -static inline void post_schedule(struct rq *rq) +static void __balance_callback(struct rq *rq) { - if (rq->post_schedule) { - unsigned long flags; + struct callback_head *head, *next; + void (*func)(struct rq *rq); + unsigned long flags; - raw_spin_lock_irqsave(&rq->lock, flags); - if (rq->curr->sched_class->post_schedule) - rq->curr->sched_class->post_schedule(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags); + head = rq->balance_callback; + rq->balance_callback = NULL; + while (head) { + func = (void (*)(struct rq *))head->func; + next = head->next; + head->next = NULL; + head = next; - rq->post_schedule = 0; + func(rq); } + raw_spin_unlock_irqrestore(&rq->lock, flags); +} + +static inline void balance_callback(struct rq *rq) +{ + if (unlikely(rq->balance_callback)) + __balance_callback(rq); } #else -static inline void post_schedule(struct rq *rq) +static inline void balance_callback(struct rq *rq) { } @@ -2311,7 +2323,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) /* finish_task_switch() drops rq->lock and enables preemtion */ preempt_disable(); rq = finish_task_switch(prev); - post_schedule(rq); + balance_callback(rq); preempt_enable(); if (current->set_child_tid) @@ -2823,7 +2835,7 @@ static void __sched __schedule(void) } else raw_spin_unlock_irq(&rq->lock); - post_schedule(rq); + balance_callback(rq); } static inline void sched_submit_work(struct task_struct *tsk) @@ -7219,7 +7231,7 @@ void __init sched_init(void) rq->sd = NULL; rq->rd = NULL; rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; - rq->post_schedule = 0; + rq->balance_callback = NULL; rq->active_balance = 0; rq->next_balance = jiffies; rq->push_cpu = 0; |