diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-12-10 02:20:22 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 09:55:42 -0800 |
commit | 7835b98bc6de2ca10afa45572d272304b000b048 (patch) | |
tree | 6c029f271821b00d18f5f318526b80511b409838 /kernel | |
parent | fe2eea3fafb3df2f5b8a55a48bcbb0d23b3b5618 (diff) | |
download | blackbird-op-linux-7835b98bc6de2ca10afa45572d272304b000b048.tar.gz blackbird-op-linux-7835b98bc6de2ca10afa45572d272304b000b048.zip |
[PATCH] sched: extract load calculation from rebalance_tick
A load calculation is always done in rebalance_tick() in addition to the real
load balancing activities that only take place when certain jiffie counts have
been reached. Move that processing into a separate function and call it
directly from scheduler_tick().
Also extract the time slice handling from scheduler_tick and put it into a
separate function. Then we can clean up scheduler_tick significantly. It
will no longer have any gotos.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 96 |
1 files changed, 54 insertions, 42 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d327511d268e..3c1b74aa1b07 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2833,20 +2833,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) spin_unlock(&target_rq->lock); } -/* - * rebalance_tick will get called every timer tick, on every CPU. - * - * It checks each scheduling domain to see if it is due to be balanced, - * and initiates a balancing operation if so. - * - * Balancing parameters are set up in arch_init_sched_domains. - */ - -static void -rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) +static void update_load(struct rq *this_rq) { - unsigned long this_load, interval; - struct sched_domain *sd; + unsigned long this_load; int i, scale; this_load = this_rq->raw_weighted_load; @@ -2866,6 +2855,22 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) new_load += scale-1; this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale; } +} + +/* + * rebalance_tick will get called every timer tick, on every CPU. + * + * It checks each scheduling domain to see if it is due to be balanced, + * and initiates a balancing operation if so. + * + * Balancing parameters are set up in arch_init_sched_domains. + */ + +static void +rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) +{ + unsigned long interval; + struct sched_domain *sd; for_each_domain(this_cpu, sd) { if (!(sd->flags & SD_LOAD_BALANCE)) @@ -2897,12 +2902,15 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) /* * on UP we do not need to balance between CPUs: */ -static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle) +static inline void rebalance_tick(int cpu, struct rq *rq) { } static inline void idle_balance(int cpu, struct rq *rq) { } +static inline void update_load(struct rq *this_rq) +{ +} #endif static inline int wake_priority_sleeper(struct rq *rq) @@ -3052,35 +3060,12 @@ void account_steal_time(struct task_struct *p, cputime_t steal) cpustat->steal = cputime64_add(cpustat->steal, tmp); } -/* - * This function gets called by the timer code, with HZ frequency. - * We call it with interrupts disabled. - * - * It also gets called by the fork code, when changing the parent's - * timeslices. - */ -void scheduler_tick(void) +static void task_running_tick(struct rq *rq, struct task_struct *p) { - unsigned long long now = sched_clock(); - struct task_struct *p = current; - int cpu = smp_processor_id(); - struct rq *rq = cpu_rq(cpu); - - update_cpu_clock(p, rq, now); - - rq->timestamp_last_tick = now; - - if (p == rq->idle) { - if (wake_priority_sleeper(rq)) - goto out; - rebalance_tick(cpu, rq, SCHED_IDLE); - return; - } - - /* Task might have expired already, but not scheduled off yet */ if (p->array != rq->active) { + /* Task has expired but was not scheduled yet */ set_tsk_need_resched(p); - goto out; + return; } spin_lock(&rq->lock); /* @@ -3148,8 +3133,35 @@ void scheduler_tick(void) } out_unlock: spin_unlock(&rq->lock); -out: - rebalance_tick(cpu, rq, NOT_IDLE); +} + +/* + * This function gets called by the timer code, with HZ frequency. + * We call it with interrupts disabled. + * + * It also gets called by the fork code, when changing the parent's + * timeslices. + */ +void scheduler_tick(void) +{ + unsigned long long now = sched_clock(); + struct task_struct *p = current; + int cpu = smp_processor_id(); + struct rq *rq = cpu_rq(cpu); + enum idle_type idle = NOT_IDLE; + + update_cpu_clock(p, rq, now); + + rq->timestamp_last_tick = now; + + if (p == rq->idle) { + /* Task on the idle queue */ + if (!wake_priority_sleeper(rq)) + idle = SCHED_IDLE; + } else + task_running_tick(rq, p); + update_load(rq); + rebalance_tick(cpu, rq, idle); } #ifdef CONFIG_SCHED_SMT |