diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-12-03 08:21:06 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-12-03 08:21:06 -0800 |
commit | ca6435f188a0ce40eae716c158053e49130886a3 (patch) | |
tree | 99d58ba96adc568bd227c48669940ab726ca18bf /kernel | |
parent | 26145f7e7ebc8a5f1b29425d6da62e872031be4a (diff) | |
parent | d842de871c8c5e2110c7e4f3f29bbe7b1a519ab8 (diff) | |
download | talos-obmc-linux-ca6435f188a0ce40eae716c158053e49130886a3.tar.gz talos-obmc-linux-ca6435f188a0ce40eae716c158053e49130886a3.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: cpu accounting controller (V2)
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 155 | ||||
-rw-r--r-- | kernel/sched_fair.c | 6 | ||||
-rw-r--r-- | kernel/sched_rt.c | 1 |
3 files changed, 136 insertions, 26 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 98dcdf272db3..59ff6b140edb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -854,6 +854,12 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct rq_iterator *iterator); #endif +#ifdef CONFIG_CGROUP_CPUACCT +static void cpuacct_charge(struct task_struct *tsk, u64 cputime); +#else +static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +#endif + #include "sched_stats.h" #include "sched_idletask.c" #include "sched_fair.c" @@ -7221,38 +7227,12 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) return (u64) tg->shares; } -static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft) -{ - struct task_group *tg = cgroup_tg(cgrp); - unsigned long flags; - u64 res = 0; - int i; - - for_each_possible_cpu(i) { - /* - * Lock to prevent races with updating 64-bit counters - * on 32-bit arches. - */ - spin_lock_irqsave(&cpu_rq(i)->lock, flags); - res += tg->se[i]->sum_exec_runtime; - spin_unlock_irqrestore(&cpu_rq(i)->lock, flags); - } - /* Convert from ns to ms */ - do_div(res, NSEC_PER_MSEC); - - return res; -} - static struct cftype cpu_files[] = { { .name = "shares", .read_uint = cpu_shares_read_uint, .write_uint = cpu_shares_write_uint, }, - { - .name = "usage", - .read_uint = cpu_usage_read, - }, }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) @@ -7272,3 +7252,126 @@ struct cgroup_subsys cpu_cgroup_subsys = { }; #endif /* CONFIG_FAIR_CGROUP_SCHED */ + +#ifdef CONFIG_CGROUP_CPUACCT + +/* + * CPU accounting code for task groups. + * + * Based on the work by Paul Menage (menage@google.com) and Balbir Singh + * (balbir@in.ibm.com). + */ + +/* track cpu usage of a group of tasks */ +struct cpuacct { + struct cgroup_subsys_state css; + /* cpuusage holds pointer to a u64-type object on every cpu */ + u64 *cpuusage; +}; + +struct cgroup_subsys cpuacct_subsys; + +/* return cpu accounting group corresponding to this container */ +static inline struct cpuacct *cgroup_ca(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* return cpu accounting group to which this task belongs */ +static inline struct cpuacct *task_ca(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* create a new cpu accounting group */ +static struct cgroup_subsys_state *cpuacct_create( + struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + + if (!ca) + return ERR_PTR(-ENOMEM); + + ca->cpuusage = alloc_percpu(u64); + if (!ca->cpuusage) { + kfree(ca); + return ERR_PTR(-ENOMEM); + } + + return &ca->css; +} + +/* destroy an existing cpu accounting group */ +static void cpuacct_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct cpuacct *ca = cgroup_ca(cont); + + free_percpu(ca->cpuusage); + kfree(ca); +} + +/* return total cpu usage (in nanoseconds) of a group */ +static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) +{ + struct cpuacct *ca = cgroup_ca(cont); + u64 totalcpuusage = 0; + int i; + + for_each_possible_cpu(i) { + u64 *cpuusage = percpu_ptr(ca->cpuusage, i); + + /* + * Take rq->lock to make 64-bit addition safe on 32-bit + * platforms. + */ + spin_lock_irq(&cpu_rq(i)->lock); + totalcpuusage += *cpuusage; + spin_unlock_irq(&cpu_rq(i)->lock); + } + + return totalcpuusage; +} + +static struct cftype files[] = { + { + .name = "usage", + .read_uint = cpuusage_read, + }, +}; + +static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); +} + +/* + * charge this task's execution time to its accounting group. + * + * called with rq->lock held. + */ +static void cpuacct_charge(struct task_struct *tsk, u64 cputime) +{ + struct cpuacct *ca; + + if (!cpuacct_subsys.active) + return; + + ca = task_ca(tsk); + if (ca) { + u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); + + *cpuusage += cputime; + } +} + +struct cgroup_subsys cpuacct_subsys = { + .name = "cpuacct", + .create = cpuacct_create, + .destroy = cpuacct_destroy, + .populate = cpuacct_populate, + .subsys_id = cpuacct_subsys_id, +}; +#endif /* CONFIG_CGROUP_CPUACCT */ diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 2f16e15c022c..37bb265598db 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -351,6 +351,12 @@ static void update_curr(struct cfs_rq *cfs_rq) __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; + + if (entity_is_task(curr)) { + struct task_struct *curtask = task_of(curr); + + cpuacct_charge(curtask, delta_exec); + } } static inline void diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8abd752a0ebd..ee9c8b6529e9 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -23,6 +23,7 @@ static void update_curr_rt(struct rq *rq) curr->se.sum_exec_runtime += delta_exec; curr->se.exec_start = rq->clock; + cpuacct_charge(curr, delta_exec); } static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) |