diff options
author | Glauber Costa <glommer@parallels.com> | 2011-11-28 14:45:17 -0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-12-06 09:06:38 +0100 |
commit | 3292beb340c76884427faa1f5d6085719477d889 (patch) | |
tree | cb7e431b2a15fa66ef5278d485131bac7a125fbd /drivers | |
parent | 786d6dc7aeb2bfbfe417507b7beb83919f319db3 (diff) | |
download | blackbird-op-linux-3292beb340c76884427faa1f5d6085719477d889.tar.gz blackbird-op-linux-3292beb340c76884427faa1f5d6085719477d889.zip |
sched/accounting: Change cpustat fields to an array
This patch changes fields in cpustat from a structure, to an
u64 array. Math gets easier, and the code is more flexible.
Signed-off-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Tuner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1322498719-2255-2-git-send-email-glommer@parallels.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 38 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 38 | ||||
-rw-r--r-- | drivers/macintosh/rack-meter.c | 8 |
3 files changed, 40 insertions, 44 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f7..118bff73fed3 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -95,27 +95,26 @@ static struct dbs_tuners { .freq_step = 5, }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - cputime64_t idle_time; + u64 idle_time; cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] + + kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } @@ -362,11 +361,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; + u64 cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { + if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d6..f3d327cee43f 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -119,27 +119,26 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - cputime64_t idle_time; + u64 idle_time; cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] + + kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; @@ -455,11 +454,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; + u64 cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { + if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->cpu = cpu; this_dbs_info->rate_mult = 1; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 2637c139777b..66d7f1c7baa1 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -81,13 +81,13 @@ static int rackmeter_ignore_nice; */ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { - cputime64_t retval; + u64 retval; - retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, - kstat_cpu(cpu).cpustat.iowait); + retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] + + kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; if (rackmeter_ignore_nice) - retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); + retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; return retval; } |