diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-11-15 15:47:02 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-18 13:27:47 +0100 |
commit | 9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a (patch) | |
tree | 74ec7a4f3e544ed64afb6cb7abe6322ca700d19a /kernel/sched.c | |
parent | 3d4b47b4b040c9d77dd68104cfc1055d89a55afd (diff) | |
download | blackbird-op-linux-9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a.tar.gz blackbird-op-linux-9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a.zip |
sched: Make tg_shares_up() walk on-demand
Make tg_shares_up() use the active cgroup list, this means we cannot
do a strict bottom-up walk of the hierarchy, but assuming its a very
wide tree with a small number of active groups it should be a win.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234937.754159484@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 67 |
1 files changed, 0 insertions, 67 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 22436dd2e19f..6268d2dc9a91 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -279,13 +279,6 @@ static DEFINE_SPINLOCK(task_group_lock); #ifdef CONFIG_FAIR_GROUP_SCHED -#ifdef CONFIG_SMP -static int root_task_group_empty(void) -{ - return list_empty(&root_task_group.children); -} -#endif - # define INIT_TASK_GROUP_LOAD NICE_0_LOAD /* @@ -1546,48 +1539,6 @@ static unsigned long cpu_avg_load_per_task(int cpu) #ifdef CONFIG_FAIR_GROUP_SCHED -static void update_cfs_load(struct cfs_rq *cfs_rq, int lb); -static void update_cfs_shares(struct cfs_rq *cfs_rq); - -/* - * update tg->load_weight by folding this cpu's load_avg - */ -static int tg_shares_up(struct task_group *tg, void *data) -{ - long load_avg; - struct cfs_rq *cfs_rq; - unsigned long flags; - int cpu = (long)data; - struct rq *rq; - - if (!tg->se[cpu]) - return 0; - - rq = cpu_rq(cpu); - cfs_rq = tg->cfs_rq[cpu]; - - raw_spin_lock_irqsave(&rq->lock, flags); - - update_rq_clock(rq); - update_cfs_load(cfs_rq, 1); - - load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); - load_avg -= cfs_rq->load_contribution; - - atomic_add(load_avg, &tg->load_weight); - cfs_rq->load_contribution += load_avg; - - /* - * We need to update shares after updating tg->load_weight in - * order to adjust the weight of groups with long running tasks. - */ - update_cfs_shares(cfs_rq); - - raw_spin_unlock_irqrestore(&rq->lock, flags); - - return 0; -} - /* * Compute the cpu's hierarchical load factor for each task group. * This needs to be done in a top-down fashion because the load of a child @@ -1611,29 +1562,11 @@ static int tg_load_down(struct task_group *tg, void *data) return 0; } -static void update_shares(long cpu) -{ - if (root_task_group_empty()) - return; - - /* - * XXX: replace with an on-demand list - */ - - walk_tg_tree(tg_nop, tg_shares_up, (void *)cpu); -} - static void update_h_load(long cpu) { walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); } -#else - -static inline void update_shares(int cpu) -{ -} - #endif #ifdef CONFIG_PREEMPT |