diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-08-14 22:21:16 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-08-14 22:21:16 +0200 |
commit | c49a089c3eedbc4b3fa9c3d469599a6c14dea4c5 (patch) | |
tree | 9f21ef73bb2d3b8fad642158c6ffa6e33ccabee3 /drivers/cpufreq/cpufreq_ondemand.c | |
parent | d4e4ab86bcba5a72779c43dc1459f71fea3d89c8 (diff) | |
parent | 3de9bdeb28638e164d1f0eb38dd68e3f5d2ac95c (diff) | |
download | blackbird-op-linux-c49a089c3eedbc4b3fa9c3d469599a6c14dea4c5.tar.gz blackbird-op-linux-c49a089c3eedbc4b3fa9c3d469599a6c14dea4c5.zip |
Merge back earlier 'pm-cpufreq' material
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 63 |
1 files changed, 14 insertions, 49 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c087347d6688..87f3305e80a6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -12,28 +12,16 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/cpufreq.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/kernel_stat.h> -#include <linux/kobject.h> -#include <linux/module.h> -#include <linux/mutex.h> +#include <linux/cpu.h> #include <linux/percpu-defs.h> #include <linux/slab.h> -#include <linux/sysfs.h> #include <linux/tick.h> -#include <linux/types.h> -#include <linux/cpu.h> - #include "cpufreq_governor.h" /* On-demand governor macros */ -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (100000) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) #define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MIN_FREQUENCY_UP_THRESHOLD (11) @@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void) } } -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) { - struct dbs_data *dbs_data = p->governor_data; + struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; if (od_tuners->powersave_bias) - freq = od_ops.powersave_bias_target(p, freq, + freq = od_ops.powersave_bias_target(policy, freq, CPUFREQ_RELATION_H); - else if (p->cur == p->max) + else if (policy->cur == policy->max) return; - __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? + __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ? CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); } /* * Every sampling_rate, we check, if current idle time is less than 20% - * (default), then we try to increase frequency. Every sampling_rate, we look - * for the lowest frequency which can sustain the load while keeping idle time - * over 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. Frequency reduction - * happens at minimum steps of 5% (default) of current frequency + * (default), then we try to increase frequency. Else, we adjust the frequency + * proportional to load. */ -static void od_check_cpu(int cpu, unsigned int load_freq) +static void od_check_cpu(int cpu, unsigned int load) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; @@ -178,29 +162,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq) dbs_info->freq_lo = 0; /* Check for frequency increase */ - if (load_freq > od_tuners->up_threshold * policy->cur) { + if (load > od_tuners->up_threshold) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that can - * support the current CPU usage without triggering the up policy. To be - * safe, we focus 10 points under the threshold. - */ - if (load_freq < od_tuners->adj_up_threshold - * policy->cur) { + } else { + /* Calculate the next frequency proportional to load */ unsigned int freq_next; - freq_next = load_freq / od_tuners->adj_up_threshold; + freq_next = load * policy->cpuinfo.max_freq / 100; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; @@ -374,9 +346,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } - /* Calculate the new adj_up_threshold */ - od_tuners->adj_up_threshold += input; - od_tuners->adj_up_threshold -= od_tuners->up_threshold; od_tuners->up_threshold = input; return count; @@ -513,7 +482,7 @@ static int od_init(struct dbs_data *dbs_data) u64 idle_time; int cpu; - tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); + tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) { pr_err("%s: kzalloc failed\n", __func__); return -ENOMEM; @@ -525,8 +494,6 @@ static int od_init(struct dbs_data *dbs_data) if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; /* * In nohz/micro accounting case we set the minimum frequency * not depending on HZ, but fixed (very low). The deferred @@ -535,8 +502,6 @@ static int od_init(struct dbs_data *dbs_data) dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; - tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - - DEF_FREQUENCY_DOWN_DIFFERENTIAL; /* For correct statistics, we need 10 ticks for each measure */ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |