diff options
Diffstat (limited to 'drivers/cpufreq')
46 files changed, 1844 insertions, 1323 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 56c31a78c692..3858d86cf409 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -19,6 +19,18 @@ config ACPI_CPPC_CPUFREQ If in doubt, say N. +config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM + tristate "Allwinner nvmem based SUN50I CPUFreq driver" + depends on ARCH_SUNXI + depends on NVMEM_SUNXI_SID + select PM_OPP + help + This adds the nvmem based CPUFreq driver for Allwinner + h6 SoC. + + To compile this driver as a module, choose M here: the + module will be called sun50i-cpufreq-nvmem. + config ARM_ARMADA_37XX_CPUFREQ tristate "Armada 37xx CPUFreq support" depends on ARCH_MVEBU && CPUFREQ_DT @@ -37,14 +49,6 @@ config ARM_ARMADA_8K_CPUFREQ If in doubt, say N. -# big LITTLE core layer and glue drivers -config ARM_BIG_LITTLE_CPUFREQ - tristate "Generic ARM big LITTLE CPUfreq driver" - depends on ARM_CPU_TOPOLOGY && HAVE_CLK - select PM_OPP - help - This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. - config ARM_SCPI_CPUFREQ tristate "SCPI based CPUfreq driver" depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI @@ -57,7 +61,9 @@ config ARM_SCPI_CPUFREQ config ARM_VEXPRESS_SPC_CPUFREQ tristate "Versatile Express SPC based CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC + depends on ARM_CPU_TOPOLOGY && HAVE_CLK + depends on ARCH_VEXPRESS_SPC + select PM_OPP help This add the CPUfreq driver support for Versatile Express big.LITTLE platforms using SPC for power management. @@ -120,8 +126,8 @@ config ARM_OMAP2PLUS_CPUFREQ depends on ARCH_OMAP2PLUS default ARCH_OMAP2PLUS -config ARM_QCOM_CPUFREQ_KRYO - tristate "Qualcomm Kryo based CPUFreq" +config ARM_QCOM_CPUFREQ_NVMEM + tristate "Qualcomm nvmem based CPUFreq" depends on ARM64 depends on QCOM_QFPROM depends on QCOM_SMEM diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 35b4f700f054..58151ca56695 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -48,9 +48,9 @@ config PPC_PASEMI_CPUFREQ PWRficient processors. config POWERNV_CPUFREQ - tristate "CPU frequency scaling for IBM POWERNV platform" - depends on PPC_POWERNV - default y - help + tristate "CPU frequency scaling for IBM POWERNV platform" + depends on PPC_POWERNV + default y + help This adds support for CPU frequency switching on IBM POWERNV platform diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index dfa6457deaf6..a6528388952e 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -4,17 +4,17 @@ # config X86_INTEL_PSTATE - bool "Intel P state control" - depends on X86 - select ACPI_PROCESSOR if ACPI - select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO - help - This driver provides a P state for Intel core processors. + bool "Intel P state control" + depends on X86 + select ACPI_PROCESSOR if ACPI + select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO + help + This driver provides a P state for Intel core processors. The driver implements an internal governor and will become - the scaling driver and governor for Sandy bridge processors. + the scaling driver and governor for Sandy bridge processors. When this driver is enabled it will become the preferred - scaling driver for Sandy bridge processors. + scaling driver for Sandy bridge processors. If in doubt, say N. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 5a6c70d26c98..f6670c4abbb0 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -47,8 +47,6 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o ################################################################################## # ARM SoC drivers -obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o - obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o @@ -64,7 +62,7 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o -obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o +obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o @@ -80,6 +78,7 @@ obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o +obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c deleted file mode 100644 index 7fe52fcddcf1..000000000000 --- a/drivers/cpufreq/arm_big_little.c +++ /dev/null @@ -1,658 +0,0 @@ -/* - * ARM big.LITTLE Platforms CPUFreq support - * - * Copyright (C) 2013 ARM Ltd. - * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> - * - * Copyright (C) 2013 Linaro. - * Viresh Kumar <viresh.kumar@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/clk.h> -#include <linux/cpu.h> -#include <linux/cpufreq.h> -#include <linux/cpumask.h> -#include <linux/cpu_cooling.h> -#include <linux/export.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/of_platform.h> -#include <linux/pm_opp.h> -#include <linux/slab.h> -#include <linux/topology.h> -#include <linux/types.h> - -#include "arm_big_little.h" - -/* Currently we support only two clusters */ -#define A15_CLUSTER 0 -#define A7_CLUSTER 1 -#define MAX_CLUSTERS 2 - -#ifdef CONFIG_BL_SWITCHER -#include <asm/bL_switcher.h> -static bool bL_switching_enabled; -#define is_bL_switching_enabled() bL_switching_enabled -#define set_switching_enabled(x) (bL_switching_enabled = (x)) -#else -#define is_bL_switching_enabled() false -#define set_switching_enabled(x) do { } while (0) -#define bL_switch_request(...) do { } while (0) -#define bL_switcher_put_enabled() do { } while (0) -#define bL_switcher_get_enabled() do { } while (0) -#endif - -#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) -#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) - -static struct thermal_cooling_device *cdev[MAX_CLUSTERS]; -static const struct cpufreq_arm_bL_ops *arm_bL_ops; -static struct clk *clk[MAX_CLUSTERS]; -static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; -static atomic_t cluster_usage[MAX_CLUSTERS + 1]; - -static unsigned int clk_big_min; /* (Big) clock frequencies */ -static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ - -static DEFINE_PER_CPU(unsigned int, physical_cluster); -static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); - -static struct mutex cluster_lock[MAX_CLUSTERS]; - -static inline int raw_cpu_to_cluster(int cpu) -{ - return topology_physical_package_id(cpu); -} - -static inline int cpu_to_cluster(int cpu) -{ - return is_bL_switching_enabled() ? - MAX_CLUSTERS : raw_cpu_to_cluster(cpu); -} - -static unsigned int find_cluster_maxfreq(int cluster) -{ - int j; - u32 max_freq = 0, cpu_freq; - - for_each_online_cpu(j) { - cpu_freq = per_cpu(cpu_last_req_freq, j); - - if ((cluster == per_cpu(physical_cluster, j)) && - (max_freq < cpu_freq)) - max_freq = cpu_freq; - } - - pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, - max_freq); - - return max_freq; -} - -static unsigned int clk_get_cpu_rate(unsigned int cpu) -{ - u32 cur_cluster = per_cpu(physical_cluster, cpu); - u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; - - /* For switcher we use virtual A7 clock rates */ - if (is_bL_switching_enabled()) - rate = VIRT_FREQ(cur_cluster, rate); - - pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, - cur_cluster, rate); - - return rate; -} - -static unsigned int bL_cpufreq_get_rate(unsigned int cpu) -{ - if (is_bL_switching_enabled()) { - pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, - cpu)); - - return per_cpu(cpu_last_req_freq, cpu); - } else { - return clk_get_cpu_rate(cpu); - } -} - -static unsigned int -bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) -{ - u32 new_rate, prev_rate; - int ret; - bool bLs = is_bL_switching_enabled(); - - mutex_lock(&cluster_lock[new_cluster]); - - if (bLs) { - prev_rate = per_cpu(cpu_last_req_freq, cpu); - per_cpu(cpu_last_req_freq, cpu) = rate; - per_cpu(physical_cluster, cpu) = new_cluster; - - new_rate = find_cluster_maxfreq(new_cluster); - new_rate = ACTUAL_FREQ(new_cluster, new_rate); - } else { - new_rate = rate; - } - - pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", - __func__, cpu, old_cluster, new_cluster, new_rate); - - ret = clk_set_rate(clk[new_cluster], new_rate * 1000); - if (!ret) { - /* - * FIXME: clk_set_rate hasn't returned an error here however it - * may be that clk_change_rate failed due to hardware or - * firmware issues and wasn't able to report that due to the - * current design of the clk core layer. To work around this - * problem we will read back the clock rate and check it is - * correct. This needs to be removed once clk core is fixed. - */ - if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) - ret = -EIO; - } - - if (WARN_ON(ret)) { - pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, - new_cluster); - if (bLs) { - per_cpu(cpu_last_req_freq, cpu) = prev_rate; - per_cpu(physical_cluster, cpu) = old_cluster; - } - - mutex_unlock(&cluster_lock[new_cluster]); - - return ret; - } - - mutex_unlock(&cluster_lock[new_cluster]); - - /* Recalc freq for old cluster when switching clusters */ - if (old_cluster != new_cluster) { - pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", - __func__, cpu, old_cluster, new_cluster); - - /* Switch cluster */ - bL_switch_request(cpu, new_cluster); - - mutex_lock(&cluster_lock[old_cluster]); - - /* Set freq of old cluster if there are cpus left on it */ - new_rate = find_cluster_maxfreq(old_cluster); - new_rate = ACTUAL_FREQ(old_cluster, new_rate); - - if (new_rate) { - pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", - __func__, old_cluster, new_rate); - - if (clk_set_rate(clk[old_cluster], new_rate * 1000)) - pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", - __func__, ret, old_cluster); - } - mutex_unlock(&cluster_lock[old_cluster]); - } - - return 0; -} - -/* Set clock frequency */ -static int bL_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int index) -{ - u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; - unsigned int freqs_new; - int ret; - - cur_cluster = cpu_to_cluster(cpu); - new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); - - freqs_new = freq_table[cur_cluster][index].frequency; - - if (is_bL_switching_enabled()) { - if ((actual_cluster == A15_CLUSTER) && - (freqs_new < clk_big_min)) { - new_cluster = A7_CLUSTER; - } else if ((actual_cluster == A7_CLUSTER) && - (freqs_new > clk_little_max)) { - new_cluster = A15_CLUSTER; - } - } - - ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); - - if (!ret) { - arch_set_freq_scale(policy->related_cpus, freqs_new, - policy->cpuinfo.max_freq); - } - - return ret; -} - -static inline u32 get_table_count(struct cpufreq_frequency_table *table) -{ - int count; - - for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) - ; - - return count; -} - -/* get the minimum frequency in the cpufreq_frequency_table */ -static inline u32 get_table_min(struct cpufreq_frequency_table *table) -{ - struct cpufreq_frequency_table *pos; - uint32_t min_freq = ~0; - cpufreq_for_each_entry(pos, table) - if (pos->frequency < min_freq) - min_freq = pos->frequency; - return min_freq; -} - -/* get the maximum frequency in the cpufreq_frequency_table */ -static inline u32 get_table_max(struct cpufreq_frequency_table *table) -{ - struct cpufreq_frequency_table *pos; - uint32_t max_freq = 0; - cpufreq_for_each_entry(pos, table) - if (pos->frequency > max_freq) - max_freq = pos->frequency; - return max_freq; -} - -static int merge_cluster_tables(void) -{ - int i, j, k = 0, count = 1; - struct cpufreq_frequency_table *table; - - for (i = 0; i < MAX_CLUSTERS; i++) - count += get_table_count(freq_table[i]); - - table = kcalloc(count, sizeof(*table), GFP_KERNEL); - if (!table) - return -ENOMEM; - - freq_table[MAX_CLUSTERS] = table; - - /* Add in reverse order to get freqs in increasing order */ - for (i = MAX_CLUSTERS - 1; i >= 0; i--) { - for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; - j++) { - table[k].frequency = VIRT_FREQ(i, - freq_table[i][j].frequency); - pr_debug("%s: index: %d, freq: %d\n", __func__, k, - table[k].frequency); - k++; - } - } - - table[k].driver_data = k; - table[k].frequency = CPUFREQ_TABLE_END; - - pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); - - return 0; -} - -static void _put_cluster_clk_and_freq_table(struct device *cpu_dev, - const struct cpumask *cpumask) -{ - u32 cluster = raw_cpu_to_cluster(cpu_dev->id); - - if (!freq_table[cluster]) - return; - - clk_put(clk[cluster]); - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); - if (arm_bL_ops->free_opp_table) - arm_bL_ops->free_opp_table(cpumask); - dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); -} - -static void put_cluster_clk_and_freq_table(struct device *cpu_dev, - const struct cpumask *cpumask) -{ - u32 cluster = cpu_to_cluster(cpu_dev->id); - int i; - - if (atomic_dec_return(&cluster_usage[cluster])) - return; - - if (cluster < MAX_CLUSTERS) - return _put_cluster_clk_and_freq_table(cpu_dev, cpumask); - - for_each_present_cpu(i) { - struct device *cdev = get_cpu_device(i); - if (!cdev) { - pr_err("%s: failed to get cpu%d device\n", __func__, i); - return; - } - - _put_cluster_clk_and_freq_table(cdev, cpumask); - } - - /* free virtual table */ - kfree(freq_table[cluster]); -} - -static int _get_cluster_clk_and_freq_table(struct device *cpu_dev, - const struct cpumask *cpumask) -{ - u32 cluster = raw_cpu_to_cluster(cpu_dev->id); - int ret; - - if (freq_table[cluster]) - return 0; - - ret = arm_bL_ops->init_opp_table(cpumask); - if (ret) { - dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", - __func__, cpu_dev->id, ret); - goto out; - } - - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); - if (ret) { - dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", - __func__, cpu_dev->id, ret); - goto free_opp_table; - } - - clk[cluster] = clk_get(cpu_dev, NULL); - if (!IS_ERR(clk[cluster])) { - dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", - __func__, clk[cluster], freq_table[cluster], - cluster); - return 0; - } - - dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", - __func__, cpu_dev->id, cluster); - ret = PTR_ERR(clk[cluster]); - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); - -free_opp_table: - if (arm_bL_ops->free_opp_table) - arm_bL_ops->free_opp_table(cpumask); -out: - dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, - cluster); - return ret; -} - -static int get_cluster_clk_and_freq_table(struct device *cpu_dev, - const struct cpumask *cpumask) -{ - u32 cluster = cpu_to_cluster(cpu_dev->id); - int i, ret; - - if (atomic_inc_return(&cluster_usage[cluster]) != 1) - return 0; - - if (cluster < MAX_CLUSTERS) { - ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); - if (ret) - atomic_dec(&cluster_usage[cluster]); - return ret; - } - - /* - * Get data for all clusters and fill virtual cluster with a merge of - * both - */ - for_each_present_cpu(i) { - struct device *cdev = get_cpu_device(i); - if (!cdev) { - pr_err("%s: failed to get cpu%d device\n", __func__, i); - return -ENODEV; - } - - ret = _get_cluster_clk_and_freq_table(cdev, cpumask); - if (ret) - goto put_clusters; - } - - ret = merge_cluster_tables(); - if (ret) - goto put_clusters; - - /* Assuming 2 cluster, set clk_big_min and clk_little_max */ - clk_big_min = get_table_min(freq_table[0]); - clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); - - pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", - __func__, cluster, clk_big_min, clk_little_max); - - return 0; - -put_clusters: - for_each_present_cpu(i) { - struct device *cdev = get_cpu_device(i); - if (!cdev) { - pr_err("%s: failed to get cpu%d device\n", __func__, i); - return -ENODEV; - } - - _put_cluster_clk_and_freq_table(cdev, cpumask); - } - - atomic_dec(&cluster_usage[cluster]); - - return ret; -} - -/* Per-CPU initialization */ -static int bL_cpufreq_init(struct cpufreq_policy *policy) -{ - u32 cur_cluster = cpu_to_cluster(policy->cpu); - struct device *cpu_dev; - int ret; - - cpu_dev = get_cpu_device(policy->cpu); - if (!cpu_dev) { - pr_err("%s: failed to get cpu%d device\n", __func__, - policy->cpu); - return -ENODEV; - } - - if (cur_cluster < MAX_CLUSTERS) { - int cpu; - - cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); - - for_each_cpu(cpu, policy->cpus) - per_cpu(physical_cluster, cpu) = cur_cluster; - } else { - /* Assumption: during init, we are always running on A15 */ - per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; - } - - ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus); - if (ret) - return ret; - - policy->freq_table = freq_table[cur_cluster]; - policy->cpuinfo.transition_latency = - arm_bL_ops->get_transition_latency(cpu_dev); - - dev_pm_opp_of_register_em(policy->cpus); - - if (is_bL_switching_enabled()) - per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); - - dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); - return 0; -} - -static int bL_cpufreq_exit(struct cpufreq_policy *policy) -{ - struct device *cpu_dev; - int cur_cluster = cpu_to_cluster(policy->cpu); - - if (cur_cluster < MAX_CLUSTERS) { - cpufreq_cooling_unregister(cdev[cur_cluster]); - cdev[cur_cluster] = NULL; - } - - cpu_dev = get_cpu_device(policy->cpu); - if (!cpu_dev) { - pr_err("%s: failed to get cpu%d device\n", __func__, - policy->cpu); - return -ENODEV; - } - - put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); - dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); - - return 0; -} - -static void bL_cpufreq_ready(struct cpufreq_policy *policy) -{ - int cur_cluster = cpu_to_cluster(policy->cpu); - - /* Do not register a cpu_cooling device if we are in IKS mode */ - if (cur_cluster >= MAX_CLUSTERS) - return; - - cdev[cur_cluster] = of_cpufreq_cooling_register(policy); -} - -static struct cpufreq_driver bL_cpufreq_driver = { - .name = "arm-big-little", - .flags = CPUFREQ_STICKY | - CPUFREQ_HAVE_GOVERNOR_PER_POLICY | - CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = bL_cpufreq_set_target, - .get = bL_cpufreq_get_rate, - .init = bL_cpufreq_init, - .exit = bL_cpufreq_exit, - .ready = bL_cpufreq_ready, - .attr = cpufreq_generic_attr, -}; - -#ifdef CONFIG_BL_SWITCHER -static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, - unsigned long action, void *_arg) -{ - pr_debug("%s: action: %ld\n", __func__, action); - - switch (action) { - case BL_NOTIFY_PRE_ENABLE: - case BL_NOTIFY_PRE_DISABLE: - cpufreq_unregister_driver(&bL_cpufreq_driver); - break; - - case BL_NOTIFY_POST_ENABLE: - set_switching_enabled(true); - cpufreq_register_driver(&bL_cpufreq_driver); - break; - - case BL_NOTIFY_POST_DISABLE: - set_switching_enabled(false); - cpufreq_register_driver(&bL_cpufreq_driver); - break; - - default: - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} - -static struct notifier_block bL_switcher_notifier = { - .notifier_call = bL_cpufreq_switcher_notifier, -}; - -static int __bLs_register_notifier(void) -{ - return bL_switcher_register_notifier(&bL_switcher_notifier); -} - -static int __bLs_unregister_notifier(void) -{ - return bL_switcher_unregister_notifier(&bL_switcher_notifier); -} -#else -static int __bLs_register_notifier(void) { return 0; } -static int __bLs_unregister_notifier(void) { return 0; } -#endif - -int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops) -{ - int ret, i; - - if (arm_bL_ops) { - pr_debug("%s: Already registered: %s, exiting\n", __func__, - arm_bL_ops->name); - return -EBUSY; - } - - if (!ops || !strlen(ops->name) || !ops->init_opp_table || - !ops->get_transition_latency) { - pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); - return -ENODEV; - } - - arm_bL_ops = ops; - - set_switching_enabled(bL_switcher_get_enabled()); - - for (i = 0; i < MAX_CLUSTERS; i++) - mutex_init(&cluster_lock[i]); - - ret = cpufreq_register_driver(&bL_cpufreq_driver); - if (ret) { - pr_info("%s: Failed registering platform driver: %s, err: %d\n", - __func__, ops->name, ret); - arm_bL_ops = NULL; - } else { - ret = __bLs_register_notifier(); - if (ret) { - cpufreq_unregister_driver(&bL_cpufreq_driver); - arm_bL_ops = NULL; - } else { - pr_info("%s: Registered platform driver: %s\n", - __func__, ops->name); - } - } - - bL_switcher_put_enabled(); - return ret; -} -EXPORT_SYMBOL_GPL(bL_cpufreq_register); - -void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops) -{ - if (arm_bL_ops != ops) { - pr_err("%s: Registered with: %s, can't unregister, exiting\n", - __func__, arm_bL_ops->name); - return; - } - - bL_switcher_get_enabled(); - __bLs_unregister_notifier(); - cpufreq_unregister_driver(&bL_cpufreq_driver); - bL_switcher_put_enabled(); - pr_info("%s: Un-registered platform driver: %s\n", __func__, - arm_bL_ops->name); - arm_bL_ops = NULL; -} -EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); - -MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); -MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h deleted file mode 100644 index 88a176e466c8..000000000000 --- a/drivers/cpufreq/arm_big_little.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ARM big.LITTLE platform's CPUFreq header file - * - * Copyright (C) 2013 ARM Ltd. - * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> - * - * Copyright (C) 2013 Linaro. - * Viresh Kumar <viresh.kumar@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef CPUFREQ_ARM_BIG_LITTLE_H -#define CPUFREQ_ARM_BIG_LITTLE_H - -#include <linux/cpufreq.h> -#include <linux/device.h> -#include <linux/types.h> - -struct cpufreq_arm_bL_ops { - char name[CPUFREQ_NAME_LEN]; - - /* - * This must set opp table for cpu_dev in a similar way as done by - * dev_pm_opp_of_add_table(). - */ - int (*init_opp_table)(const struct cpumask *cpumask); - - /* Optional */ - int (*get_transition_latency)(struct device *cpu_dev); - void (*free_opp_table)(const struct cpumask *cpumask); -}; - -int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops); -void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops); - -#endif /* CPUFREQ_ARM_BIG_LITTLE_H */ diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 988ebc326bdb..39e34f5066d3 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void) nb_cpus = num_possible_cpus(); freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL); + if (!freq_tables) + return -ENOMEM; cpumask_copy(&cpus, cpu_possible_mask); /* diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 77b0e5d0fb13..4f86ce2db34f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct private_data *priv = policy->driver_data; + cpufreq_cpu_put(policy); + return brcm_avs_get_frequency(priv->base); } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8d8da763adc5..bda0b2406fba 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -39,7 +39,7 @@ static struct cppc_cpudata **all_cpu_data; struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE +1]; + char oem_id[ACPI_OEM_ID_SIZE + 1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; u32 oem_revision; }; @@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void) for (i = 0; i < ARRAY_SIZE(wa_info); i++) { if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) + wa_info[i].oem_revision == tbl->oem_revision) { apply_hisi_workaround = true; + break; + } } + + acpi_put_table(tbl); } /* Callback function used to retrieve the max frequency from DMI */ @@ -217,7 +221,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, return ret; } -static int cppc_verify_policy(struct cpufreq_policy *policy) +static int cppc_verify_policy(struct cpufreq_policy_data *policy) { cpufreq_verify_within_cpu_limits(policy); return 0; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 03dc4244ab00..f2ae9cd455c1 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -86,7 +86,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "st-ericsson,u9540", }, { .compatible = "ti,omap2", }, - { .compatible = "ti,omap3", }, { .compatible = "ti,omap4", }, { .compatible = "ti,omap5", }, @@ -101,12 +100,16 @@ static const struct of_device_id whitelist[] __initconst = { * platforms using "operating-points-v2" property. */ static const struct of_device_id blacklist[] __initconst = { + { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "calxeda,highbank", }, { .compatible = "calxeda,ecx-2000", }, { .compatible = "fsl,imx7d", }, { .compatible = "fsl,imx8mq", }, { .compatible = "fsl,imx8mm", }, + { .compatible = "fsl,imx8mn", }, + { .compatible = "fsl,imx8mp", }, { .compatible = "marvell,armadaxp", }, @@ -117,12 +120,16 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, + { .compatible = "mediatek,mt8183", }, + { .compatible = "nvidia,tegra20", }, + { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8996", }, + { .compatible = "qcom,qcs404", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, @@ -132,6 +139,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "ti,am33xx", }, { .compatible = "ti,am43", }, { .compatible = "ti,dra7", }, + { .compatible = "ti,omap3", }, { } }; @@ -175,4 +183,4 @@ create_pdev: -1, data, sizeof(struct cpufreq_dt_platform_data))); } -device_initcall(cpufreq_dt_platdev_init); +core_initcall(cpufreq_dt_platdev_init); diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index cd53272e2fa2..f7a7bcf6f52e 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -291,7 +291,7 @@ static int nforce2_target(struct cpufreq_policy *policy, * nforce2_verify - verifies a new CPUFreq policy * @policy: new policy */ -static int nforce2_verify(struct cpufreq_policy *policy) +static int nforce2_verify(struct cpufreq_policy_data *policy) { unsigned int fsb_pol_max; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c28ebf2810f1..cbe6c94bf158 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -74,6 +74,9 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy); static int cpufreq_start_governor(struct cpufreq_policy *policy); static void cpufreq_stop_governor(struct cpufreq_policy *policy); static void cpufreq_governor_limits(struct cpufreq_policy *policy); +static int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_governor *new_gov, + unsigned int new_pol); /** * Two notifier lists: the "policy" list is involved in the @@ -102,6 +105,8 @@ bool have_governor_per_policy(void) } EXPORT_SYMBOL_GPL(have_governor_per_policy); +static struct kobject *cpufreq_global_kobject; + struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) { if (have_governor_per_policy()) @@ -113,18 +118,21 @@ EXPORT_SYMBOL_GPL(get_governor_parent_kobj); static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - u64 idle_time; + struct kernel_cpustat kcpustat; u64 cur_wall_time; + u64 idle_time; u64 busy_time; cur_wall_time = jiffies64_to_nsecs(get_jiffies_64()); - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + kcpustat_cpu_fetch(&kcpustat, cpu); + + busy_time = kcpustat.cpustat[CPUTIME_USER]; + busy_time += kcpustat.cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat.cpustat[CPUTIME_IRQ]; + busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat.cpustat[CPUTIME_STEAL]; + busy_time += kcpustat.cpustat[CPUTIME_NICE]; idle_time = cur_wall_time - busy_time; if (wall) @@ -613,25 +621,22 @@ static struct cpufreq_governor *find_governor(const char *str_governor) return NULL; } -static int cpufreq_parse_policy(char *str_governor, - struct cpufreq_policy *policy) +static unsigned int cpufreq_parse_policy(char *str_governor) { - if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { - policy->policy = CPUFREQ_POLICY_PERFORMANCE; - return 0; - } - if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { - policy->policy = CPUFREQ_POLICY_POWERSAVE; - return 0; - } - return -EINVAL; + if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) + return CPUFREQ_POLICY_PERFORMANCE; + + if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) + return CPUFREQ_POLICY_POWERSAVE; + + return CPUFREQ_POLICY_UNKNOWN; } /** * cpufreq_parse_governor - parse a governor string only for has_target() + * @str_governor: Governor name. */ -static int cpufreq_parse_governor(char *str_governor, - struct cpufreq_policy *policy) +static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) { struct cpufreq_governor *t; @@ -645,7 +650,7 @@ static int cpufreq_parse_governor(char *str_governor, ret = request_module("cpufreq_%s", str_governor); if (ret) - return -EINVAL; + return NULL; mutex_lock(&cpufreq_governor_mutex); @@ -656,12 +661,7 @@ static int cpufreq_parse_governor(char *str_governor, mutex_unlock(&cpufreq_governor_mutex); - if (t) { - policy->governor = t; - return 0; - } - - return -EINVAL; + return t; } /** @@ -720,7 +720,7 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\ + ret = freq_qos_update_request(policy->object##_freq_req, val);\ return ret >= 0 ? count : ret; \ } @@ -762,28 +762,33 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { + char str_governor[16]; int ret; - char str_governor[16]; - struct cpufreq_policy new_policy; - - memcpy(&new_policy, policy, sizeof(*policy)); ret = sscanf(buf, "%15s", str_governor); if (ret != 1) return -EINVAL; if (cpufreq_driver->setpolicy) { - if (cpufreq_parse_policy(str_governor, &new_policy)) + unsigned int new_pol; + + new_pol = cpufreq_parse_policy(str_governor); + if (!new_pol) return -EINVAL; + + ret = cpufreq_set_policy(policy, NULL, new_pol); } else { - if (cpufreq_parse_governor(str_governor, &new_policy)) + struct cpufreq_governor *new_gov; + + new_gov = cpufreq_parse_governor(str_governor); + if (!new_gov) return -EINVAL; - } - ret = cpufreq_set_policy(policy, &new_policy); + ret = cpufreq_set_policy(policy, new_gov, + CPUFREQ_POLICY_UNKNOWN); - if (new_policy.governor) - module_put(new_policy.governor->owner); + module_put(new_gov->owner); + } return ret ? ret : count; } @@ -933,6 +938,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) struct freq_attr *fattr = to_attr(attr); ssize_t ret; + if (!fattr->show) + return -EIO; + down_read(&policy->rwsem); ret = fattr->show(policy, buf); up_read(&policy->rwsem); @@ -947,6 +955,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; + if (!fattr->store) + return -EIO; + /* * cpus_read_trylock() is used here to work around a circular lock * dependency problem with respect to the cpufreq_register_driver(). @@ -1044,40 +1055,33 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void) static int cpufreq_init_policy(struct cpufreq_policy *policy) { - struct cpufreq_governor *gov = NULL, *def_gov = NULL; - struct cpufreq_policy new_policy; - - memcpy(&new_policy, policy, sizeof(*policy)); - - def_gov = cpufreq_default_governor(); + struct cpufreq_governor *def_gov = cpufreq_default_governor(); + struct cpufreq_governor *gov = NULL; + unsigned int pol = CPUFREQ_POLICY_UNKNOWN; if (has_target()) { - /* - * Update governor of new_policy to the governor used before - * hotplug - */ + /* Update policy governor to the one used before hotplug. */ gov = find_governor(policy->last_governor); if (gov) { pr_debug("Restoring governor %s for cpu %d\n", - policy->governor->name, policy->cpu); - } else { - if (!def_gov) - return -ENODATA; + policy->governor->name, policy->cpu); + } else if (def_gov) { gov = def_gov; + } else { + return -ENODATA; } - new_policy.governor = gov; } else { /* Use the default policy if there is no last_policy. */ if (policy->last_policy) { - new_policy.policy = policy->last_policy; + pol = policy->last_policy; + } else if (def_gov) { + pol = cpufreq_parse_policy(def_gov->name); } else { - if (!def_gov) - return -ENODATA; - cpufreq_parse_policy(def_gov->name, &new_policy); + return -ENODATA; } } - return cpufreq_set_policy(policy, &new_policy); + return cpufreq_set_policy(policy, gov, pol); } static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) @@ -1105,13 +1109,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp void refresh_frequency_limits(struct cpufreq_policy *policy) { - struct cpufreq_policy new_policy; - if (!policy_is_inactive(policy)) { - new_policy = *policy; pr_debug("updating policy for CPU %u\n", policy->cpu); - cpufreq_set_policy(policy, &new_policy); + cpufreq_set_policy(policy, policy->governor, policy->policy); } } EXPORT_SYMBOL(refresh_frequency_limits); @@ -1202,19 +1203,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) goto err_free_real_cpus; } + freq_constraints_init(&policy->constraints); + policy->nb_min.notifier_call = cpufreq_notifier_min; policy->nb_max.notifier_call = cpufreq_notifier_max; - ret = dev_pm_qos_add_notifier(dev, &policy->nb_min, - DEV_PM_QOS_MIN_FREQUENCY); + ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN, + &policy->nb_min); if (ret) { dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n", ret, cpumask_pr_args(policy->cpus)); goto err_kobj_remove; } - ret = dev_pm_qos_add_notifier(dev, &policy->nb_max, - DEV_PM_QOS_MAX_FREQUENCY); + ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX, + &policy->nb_max); if (ret) { dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n", ret, cpumask_pr_args(policy->cpus)); @@ -1232,8 +1235,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) return policy; err_min_qos_notifier: - dev_pm_qos_remove_notifier(dev, &policy->nb_min, - DEV_PM_QOS_MIN_FREQUENCY); + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, + &policy->nb_min); err_kobj_remove: cpufreq_policy_put_kobj(policy); err_free_real_cpus: @@ -1250,7 +1253,6 @@ err_free_policy: static void cpufreq_policy_free(struct cpufreq_policy *policy) { - struct device *dev = get_cpu_device(policy->cpu); unsigned long flags; int cpu; @@ -1262,12 +1264,25 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) per_cpu(cpufreq_cpu_data, cpu) = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - dev_pm_qos_remove_notifier(dev, &policy->nb_max, - DEV_PM_QOS_MAX_FREQUENCY); - dev_pm_qos_remove_notifier(dev, &policy->nb_min, - DEV_PM_QOS_MIN_FREQUENCY); - dev_pm_qos_remove_request(policy->max_freq_req); - dev_pm_qos_remove_request(policy->min_freq_req); + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX, + &policy->nb_max); + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, + &policy->nb_min); + + /* Cancel any pending policy->update work before freeing the policy. */ + cancel_work_sync(&policy->update); + + if (policy->max_freq_req) { + /* + * CPUFREQ_CREATE_POLICY notification is sent only after + * successfully adding max_freq_req request. + */ + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_REMOVE_POLICY, policy); + freq_qos_remove_request(policy->max_freq_req); + } + + freq_qos_remove_request(policy->min_freq_req); kfree(policy->min_freq_req); cpufreq_policy_put_kobj(policy); @@ -1347,8 +1362,6 @@ static int cpufreq_online(unsigned int cpu) cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); if (new_policy) { - struct device *dev = get_cpu_device(cpu); - for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; add_cpu_dev_symlink(policy, j); @@ -1359,38 +1372,36 @@ static int cpufreq_online(unsigned int cpu) if (!policy->min_freq_req) goto out_destroy_policy; - ret = dev_pm_qos_add_request(dev, policy->min_freq_req, - DEV_PM_QOS_MIN_FREQUENCY, - policy->min); + ret = freq_qos_add_request(&policy->constraints, + policy->min_freq_req, FREQ_QOS_MIN, + policy->min); if (ret < 0) { /* - * So we don't call dev_pm_qos_remove_request() for an + * So we don't call freq_qos_remove_request() for an * uninitialized request. */ kfree(policy->min_freq_req); policy->min_freq_req = NULL; - - dev_err(dev, "Failed to add min-freq constraint (%d)\n", - ret); goto out_destroy_policy; } /* * This must be initialized right here to avoid calling - * dev_pm_qos_remove_request() on uninitialized request in case + * freq_qos_remove_request() on uninitialized request in case * of errors. */ policy->max_freq_req = policy->min_freq_req + 1; - ret = dev_pm_qos_add_request(dev, policy->max_freq_req, - DEV_PM_QOS_MAX_FREQUENCY, - policy->max); + ret = freq_qos_add_request(&policy->constraints, + policy->max_freq_req, FREQ_QOS_MAX, + policy->max); if (ret < 0) { policy->max_freq_req = NULL; - dev_err(dev, "Failed to add max-freq constraint (%d)\n", - ret); goto out_destroy_policy; } + + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_CREATE_POLICY, policy); } if (cpufreq_driver->get && has_target()) { @@ -1807,8 +1818,8 @@ void cpufreq_suspend(void) } if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) - pr_err("%s: Failed to suspend driver: %p\n", __func__, - policy); + pr_err("%s: Failed to suspend driver: %s\n", __func__, + cpufreq_driver->name); } suspend: @@ -2140,7 +2151,7 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int ret = -EINVAL; + int ret; down_write(&policy->rwsem); @@ -2345,66 +2356,49 @@ EXPORT_SYMBOL(cpufreq_get_policy); /** * cpufreq_set_policy - Modify cpufreq policy parameters. * @policy: Policy object to modify. - * @new_policy: New policy data. + * @new_gov: Policy governor pointer. + * @new_pol: Policy value (for drivers with built-in governors). * - * Pass @new_policy to the cpufreq driver's ->verify() callback, run the - * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to - * the driver's ->verify() callback again and run the notifiers for it again - * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters - * of @new_policy to @policy and either invoke the driver's ->setpolicy() - * callback (if present) or carry out a governor update for @policy. That is, - * run the current governor's ->limits() callback (if the governor field in - * @new_policy points to the same object as the one in @policy) or replace the - * governor for @policy with the new one stored in @new_policy. + * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency + * limits to be set for the policy, update @policy with the verified limits + * values and either invoke the driver's ->setpolicy() callback (if present) or + * carry out a governor update for @policy. That is, run the current governor's + * ->limits() callback (if @new_gov points to the same object as the one in + * @policy) or replace the governor for @policy with @new_gov. * * The cpuinfo part of @policy is not updated by this function. */ -int cpufreq_set_policy(struct cpufreq_policy *policy, - struct cpufreq_policy *new_policy) +static int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_governor *new_gov, + unsigned int new_pol) { + struct cpufreq_policy_data new_data; struct cpufreq_governor *old_gov; - struct device *cpu_dev = get_cpu_device(policy->cpu); int ret; - pr_debug("setting new policy for CPU %u: %u - %u kHz\n", - new_policy->cpu, new_policy->min, new_policy->max); - - memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); - + memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); + new_data.freq_table = policy->freq_table; + new_data.cpu = policy->cpu; /* * PM QoS framework collects all the requests from users and provide us * the final aggregated value here. */ - new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY); - new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY); + new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); + new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); - /* verify the cpu speed can be set within this limit */ - ret = cpufreq_driver->verify(new_policy); - if (ret) - return ret; - - /* - * The notifier-chain shall be removed once all the users of - * CPUFREQ_ADJUST are moved to use the QoS framework. - */ - /* adjust if necessary - all reasons */ - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_ADJUST, new_policy); + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", + new_data.cpu, new_data.min, new_data.max); /* - * verify the cpu speed can be set within this limit, which might be - * different to the first one + * Verify that the CPU speed can be set within these limits and make sure + * that min <= max. */ - ret = cpufreq_driver->verify(new_policy); + ret = cpufreq_driver->verify(&new_data); if (ret) return ret; - /* notification of the new policy */ - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_NOTIFY, new_policy); - - policy->min = new_policy->min; - policy->max = new_policy->max; + policy->min = new_data.min; + policy->max = new_data.max; trace_cpu_frequency_limits(policy); policy->cached_target_freq = UINT_MAX; @@ -2413,12 +2407,12 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, policy->min, policy->max); if (cpufreq_driver->setpolicy) { - policy->policy = new_policy->policy; + policy->policy = new_pol; pr_debug("setting range\n"); return cpufreq_driver->setpolicy(policy); } - if (new_policy->governor == policy->governor) { + if (new_gov == policy->governor) { pr_debug("governor limits update\n"); cpufreq_governor_limits(policy); return 0; @@ -2435,7 +2429,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, } /* start new governor */ - policy->governor = new_policy->governor; + policy->governor = new_gov; ret = cpufreq_init_governor(policy); if (!ret) { ret = cpufreq_start_governor(policy); @@ -2527,7 +2521,7 @@ static int cpufreq_boost_set_sw(int state) break; } - ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); + ret = freq_qos_update_request(policy->max_freq_req, policy->max); if (ret < 0) break; } @@ -2641,6 +2635,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (cpufreq_disabled()) return -ENODEV; + /* + * The cpufreq core depends heavily on the availability of device + * structure, make sure they are available before proceeding further. + */ + if (!get_cpu_device(0)) + return -EPROBE_DEFER; + if (!driver_data || !driver_data->verify || !driver_data->init || !(driver_data->setpolicy || driver_data->target_index || driver_data->target) || @@ -2746,17 +2747,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); -/* - * Stop cpufreq at shutdown to make sure it isn't holding any locks - * or mutexes when secondary CPUs are halted. - */ -static struct syscore_ops cpufreq_syscore_ops = { - .shutdown = cpufreq_suspend, -}; - -struct kobject *cpufreq_global_kobject; -EXPORT_SYMBOL(cpufreq_global_kobject); - static int __init cpufreq_core_init(void) { if (cpufreq_disabled()) @@ -2765,8 +2755,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); - return 0; } module_param(off, int, 0444); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index b66e81c06a57..737ff3b9c2c0 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -346,7 +346,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) return CPU_FREQ_GOV_CONSERVATIVE; } -fs_initcall(cpufreq_gov_dbs_init); +core_initcall(cpufreq_gov_dbs_init); #else module_init(cpufreq_gov_dbs_init); #endif diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 4bb054d0cb43..f99ae45efaea 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -105,7 +105,7 @@ void gov_update_cpu_data(struct dbs_data *dbs_data) j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, dbs_data->io_is_busy); if (dbs_data->ignore_nice_load) - j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j); } } } @@ -149,7 +149,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) j_cdbs->prev_cpu_idle = cur_idle_time; if (ignore_nice) { - u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j); idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC); j_cdbs->prev_cpu_nice = cur_nice; @@ -530,7 +530,7 @@ int cpufreq_dbs_governor_start(struct cpufreq_policy *policy) j_cdbs->prev_load = 0; if (ignore_nice) - j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j); } gov->start(policy); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index dced033875bf..82a4d37ddecb 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -483,7 +483,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) return CPU_FREQ_GOV_ONDEMAND; } -fs_initcall(cpufreq_gov_dbs_init); +core_initcall(cpufreq_gov_dbs_init); #else module_init(cpufreq_gov_dbs_init); #endif diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index aaa04dfcacd9..def9afe0f5b8 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -50,5 +50,5 @@ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq policy governor 'performance'"); MODULE_LICENSE("GPL"); -fs_initcall(cpufreq_gov_performance_init); +core_initcall(cpufreq_gov_performance_init); module_exit(cpufreq_gov_performance_exit); diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index c143dc237d87..1ae66019eb83 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -43,7 +43,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) return &cpufreq_gov_powersave; } -fs_initcall(cpufreq_gov_powersave_init); +core_initcall(cpufreq_gov_powersave_init); #else module_init(cpufreq_gov_powersave_init); #endif diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index cbd81c58cb8f..b43e7cd502c5 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -147,7 +147,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) return &cpufreq_gov_userspace; } -fs_initcall(cpufreq_gov_userspace_init); +core_initcall(cpufreq_gov_userspace_init); #else module_init(cpufreq_gov_userspace_init); #endif diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index ded427e0a488..e117b0059123 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -60,7 +60,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, return 0; } -int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, +int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, struct cpufreq_frequency_table *table) { struct cpufreq_frequency_table *pos; @@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); * Generic routine to verify policy & frequency table, requires driver to set * policy->freq_table prior to it. */ -int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) +int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) { if (!policy->freq_table) return -ENODEV; diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index e97b5733aa24..75b3ef7ec679 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -328,7 +328,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) * for the hardware supported by the driver. */ -static int cpufreq_gx_verify(struct cpufreq_policy *policy) +static int cpufreq_gx_verify(struct cpufreq_policy_data *policy) { unsigned int tmp_freq = 0; u8 tmp1, tmp2; diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 4f85f3112784..6cb8193421ea 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -16,6 +16,7 @@ #define OCOTP_CFG3_SPEED_GRADE_SHIFT 8 #define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8) +#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8) #define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6 #define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6) @@ -34,23 +35,30 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (ret) return ret; - speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) + speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK) + >> OCOTP_CFG3_SPEED_GRADE_SHIFT; + else + speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) + >> OCOTP_CFG3_SPEED_GRADE_SHIFT; mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT; /* - * Early samples without fuses written report "0 0" which means - * consumer segment and minimum speed grading. - * - * According to datasheet minimum speed grading is not supported for - * consumer parts so clamp to 1 to avoid warning for "no OPPs" + * Early samples without fuses written report "0 0" which may NOT + * match any OPP defined in DT. So clamp to minimum OPP defined in + * DT to avoid warning for "no OPPs". * * Applies to i.MX8M series SoCs. */ - if (mkt_segment == 0 && speed_grade == 0 && ( - of_machine_is_compatible("fsl,imx8mm") || - of_machine_is_compatible("fsl,imx8mn") || - of_machine_is_compatible("fsl,imx8mq"))) - speed_grade = 1; + if (mkt_segment == 0 && speed_grade == 0) { + if (of_machine_is_compatible("fsl,imx8mm") || + of_machine_is_compatible("fsl,imx8mq")) + speed_grade = 1; + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) + speed_grade = 0xb; + } supported_hw[0] = BIT(speed_grade); supported_hw[1] = BIT(mkt_segment); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cc27d4c59dca..c81e1ff29069 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -24,6 +24,7 @@ #include <linux/fs.h> #include <linux/acpi.h> #include <linux/vmalloc.h> +#include <linux/pm_qos.h> #include <trace/events/power.h> #include <asm/div64.h> @@ -171,7 +172,7 @@ struct vid_data { /** * struct global_params - Global parameters, mostly tunable via sysfs. * @no_turbo: Whether or not to use turbo P-states. - * @turbo_disabled: Whethet or not turbo P-states are available at all, + * @turbo_disabled: Whether or not turbo P-states are available at all, * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. @@ -846,11 +847,9 @@ static void intel_pstate_hwp_force_min_perf(int cpu) value |= HWP_MAX_PERF(min_perf); value |= HWP_MIN_PERF(min_perf); - /* Set EPP/EPB to min */ + /* Set EPP to min */ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - else - intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } @@ -1085,6 +1084,47 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, return count; } +static struct cpufreq_driver intel_pstate; + +static void update_qos_request(enum freq_qos_req_type type) +{ + int max_state, turbo_max, freq, i, perf_pct; + struct freq_qos_request *req; + struct cpufreq_policy *policy; + + for_each_possible_cpu(i) { + struct cpudata *cpu = all_cpu_data[i]; + + policy = cpufreq_cpu_get(i); + if (!policy) + continue; + + req = policy->driver_data; + cpufreq_cpu_put(policy); + + if (!req) + continue; + + if (hwp_active) + intel_pstate_get_hwp_max(i, &turbo_max, &max_state); + else + turbo_max = cpu->pstate.turbo_pstate; + + if (type == FREQ_QOS_MIN) { + perf_pct = global.min_perf_pct; + } else { + req++; + perf_pct = global.max_perf_pct; + } + + freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); + freq *= cpu->pstate.scaling; + + if (freq_qos_update_request(req, freq) < 0) + pr_warn("Failed to update freq constraint: CPU%d\n", i); + } +} + static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { @@ -1108,7 +1148,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); - intel_pstate_update_policies(); + if (intel_pstate_driver == &intel_pstate) + intel_pstate_update_policies(); + else + update_qos_request(FREQ_QOS_MAX); mutex_unlock(&intel_pstate_driver_lock); @@ -1139,7 +1182,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); - intel_pstate_update_policies(); + if (intel_pstate_driver == &intel_pstate) + intel_pstate_update_policies(); + else + update_qos_request(FREQ_QOS_MIN); mutex_unlock(&intel_pstate_driver_lock); @@ -1867,22 +1913,22 @@ static const struct pstate_funcs knl_funcs = { (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), + ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs), ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), - ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), + ICPU(INTEL_FAM6_HASWELL, core_funcs), + ICPU(INTEL_FAM6_BROADWELL, core_funcs), ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), ICPU(INTEL_FAM6_HASWELL_X, core_funcs), - ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), - ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), + ICPU(INTEL_FAM6_HASWELL_L, core_funcs), + ICPU(INTEL_FAM6_HASWELL_G, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_G, core_funcs), ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), - ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_D, core_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), @@ -1893,20 +1939,20 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_D, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), {} }; static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { - ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs), + ICPU(INTEL_FAM6_KABYLAKE, core_funcs), {} }; static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE, core_funcs), {} }; @@ -1990,8 +2036,9 @@ static int intel_pstate_get_max_freq(struct cpudata *cpu) cpu->pstate.max_freq : cpu->pstate.turbo_freq; } -static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, - struct cpudata *cpu) +static void intel_pstate_update_perf_limits(struct cpudata *cpu, + unsigned int policy_min, + unsigned int policy_max) { int max_freq = intel_pstate_get_max_freq(cpu); int32_t max_policy_perf, min_policy_perf; @@ -2010,18 +2057,17 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, turbo_max = cpu->pstate.turbo_pstate; } - max_policy_perf = max_state * policy->max / max_freq; - if (policy->max == policy->min) { + max_policy_perf = max_state * policy_max / max_freq; + if (policy_max == policy_min) { min_policy_perf = max_policy_perf; } else { - min_policy_perf = max_state * policy->min / max_freq; + min_policy_perf = max_state * policy_min / max_freq; min_policy_perf = clamp_t(int32_t, min_policy_perf, 0, max_policy_perf); } pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", - policy->cpu, max_state, - min_policy_perf, max_policy_perf); + cpu->cpu, max_state, min_policy_perf, max_policy_perf); /* Normalize user input to [min_perf, max_perf] */ if (per_cpu_limits) { @@ -2035,7 +2081,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); global_min = clamp_t(int32_t, global_min, 0, global_max); - pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu, + pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, global_min, global_max); cpu->min_perf_ratio = max(min_policy_perf, global_min); @@ -2048,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, cpu->max_perf_ratio); } - pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu, + pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, cpu->max_perf_ratio, cpu->min_perf_ratio); } @@ -2068,7 +2114,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) mutex_lock(&intel_pstate_limits_lock); - intel_pstate_update_perf_limits(policy, cpu); + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { /* @@ -2097,8 +2143,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) return 0; } -static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, - struct cpudata *cpu) +static void intel_pstate_adjust_policy_max(struct cpudata *cpu, + struct cpufreq_policy_data *policy) { if (!hwp_active && cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && @@ -2109,7 +2155,7 @@ static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, } } -static int intel_pstate_verify_policy(struct cpufreq_policy *policy) +static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; @@ -2117,11 +2163,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, intel_pstate_get_max_freq(cpu)); - if (policy->policy != CPUFREQ_POLICY_POWERSAVE && - policy->policy != CPUFREQ_POLICY_PERFORMANCE) - return -EINVAL; - - intel_pstate_adjust_policy_max(policy, cpu); + intel_pstate_adjust_policy_max(cpu, policy); return 0; } @@ -2222,7 +2264,7 @@ static struct cpufreq_driver intel_pstate = { .name = "intel_pstate", }; -static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) +static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; @@ -2230,9 +2272,9 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, intel_pstate_get_max_freq(cpu)); - intel_pstate_adjust_policy_max(policy, cpu); + intel_pstate_adjust_policy_max(cpu, policy); - intel_pstate_update_perf_limits(policy, cpu); + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); return 0; } @@ -2332,8 +2374,16 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) { - int ret = __intel_pstate_cpu_init(policy); + int max_state, turbo_max, min_freq, max_freq, ret; + struct freq_qos_request *req; + struct cpudata *cpu; + struct device *dev; + dev = get_cpu_device(policy->cpu); + if (!dev) + return -ENODEV; + + ret = __intel_pstate_cpu_init(policy); if (ret) return ret; @@ -2342,7 +2392,63 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) /* This reflects the intel_pstate_get_cpu_pstates() setting. */ policy->cur = policy->cpuinfo.min_freq; + req = kcalloc(2, sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto pstate_exit; + } + + cpu = all_cpu_data[policy->cpu]; + + if (hwp_active) + intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state); + else + turbo_max = cpu->pstate.turbo_pstate; + + min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); + min_freq *= cpu->pstate.scaling; + max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); + max_freq *= cpu->pstate.scaling; + + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, + min_freq); + if (ret < 0) { + dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); + goto free_req; + } + + ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, + max_freq); + if (ret < 0) { + dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); + goto remove_min_req; + } + + policy->driver_data = req; + return 0; + +remove_min_req: + freq_qos_remove_request(req); +free_req: + kfree(req); +pstate_exit: + intel_pstate_exit_perf_limits(policy); + + return ret; +} + +static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct freq_qos_request *req; + + req = policy->driver_data; + + freq_qos_remove_request(req + 1); + freq_qos_remove_request(req); + kfree(req); + + return intel_pstate_cpu_exit(policy); } static struct cpufreq_driver intel_cpufreq = { @@ -2351,7 +2457,7 @@ static struct cpufreq_driver intel_cpufreq = { .target = intel_cpufreq_target, .fast_switch = intel_cpufreq_fast_switch, .init = intel_cpufreq_cpu_init, - .exit = intel_pstate_cpu_exit, + .exit = intel_cpufreq_cpu_exit, .stop_cpu = intel_cpufreq_stop_cpu, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", @@ -2552,21 +2658,21 @@ enum { /* Hardware vendor-specific info that has its own power management modes */ static struct acpi_platform_list plat_info[] __initdata = { - {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS}, - {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, - {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, + {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, + {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, + {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, { } /* End */ }; @@ -2624,7 +2730,7 @@ static inline void intel_pstate_request_control_from_smm(void) {} static const struct x86_cpu_id hwp_support_ids[] __initconst = { ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), - ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), ICPU_HWP(X86_MODEL_ANY, 0), {} }; diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index cb74bdc5baaa..70ad8fe1d78b 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = { static int kirkwood_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - struct resource *res; int err; priv.dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv.base = devm_ioremap_resource(&pdev->dev, res); + priv.base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 64b8689f7a4a..0b08be8bff76 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c @@ -122,7 +122,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy) * Validates a new CPUFreq policy. This function has to be called with * cpufreq_driver locked. */ -static int longrun_verify_policy(struct cpufreq_policy *policy) +static int longrun_verify_policy(struct cpufreq_policy_data *policy) { if (!policy) return -EINVAL; @@ -130,10 +130,6 @@ static int longrun_verify_policy(struct cpufreq_policy *policy) policy->cpu = 0; cpufreq_verify_within_cpu_limits(policy); - if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && - (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) - return -EINVAL; - return 0; } diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 890813e0bb76..909f40fbcde2 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -23,7 +23,7 @@ #include <asm/clock.h> #include <asm/idle.h> -#include <asm/mach-loongson64/loongson.h> +#include <asm/mach-loongson2ef/loongson.h> static uint nowait; @@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void) u32 cpu_freq; spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG(0); - LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */ + cpu_freq = readl(LOONGSON_CHIPCFG); + /* Put CPU into wait mode */ + writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG); + /* Restore CPU state */ + writel(cpu_freq, LOONGSON_CHIPCFG); spin_unlock_irqrestore(&loongson2_wait_lock, flags); local_irq_enable(); } diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index f14f3a85f2f7..0c98dd08273d 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -338,7 +338,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) goto out_free_resources; } - proc_reg = regulator_get_exclusive(cpu_dev, "proc"); + proc_reg = regulator_get_optional(cpu_dev, "proc"); if (IS_ERR(proc_reg)) { if (PTR_ERR(proc_reg) == -EPROBE_DEFER) pr_warn("proc regulator for cpu%d not ready, retry.\n", @@ -535,6 +535,8 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, + { .compatible = "mediatek,mt8183", }, + { .compatible = "mediatek,mt8516", }, { } }; diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index fdc767fdbe6a..5789fe7a94bd 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -109,7 +109,7 @@ struct pcc_cpu { static struct pcc_cpu __percpu *pcc_cpu_info; -static int pcc_cpufreq_verify(struct cpufreq_policy *policy) +static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy) { cpufreq_verify_within_cpu_limits(policy); return 0; @@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void) goto out_free; } - pcch_virt_addr = ioremap_nocache(mem_resource->minimum, + pcch_virt_addr = ioremap(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 6061850e59c9..56f4bc0d209e 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -1041,9 +1041,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = { static int init_chip_info(void) { - unsigned int chip[256]; + unsigned int *chip; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; + int ret = 0; + + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); @@ -1055,8 +1060,10 @@ static int init_chip_info(void) } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); - if (!chips) - return -ENOMEM; + if (!chips) { + ret = -ENOMEM; + goto free_and_return; + } for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; @@ -1066,7 +1073,9 @@ static int init_chip_info(void) per_cpu(chip_info, cpu) = &chips[i]; } - return 0; +free_and_return: + kfree(chip); + return ret; } static inline void clean_chip_info(void) diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index b83f36febf03..c58abb4cca3a 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -110,6 +110,13 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) #endif policy->freq_table = cbe_freqs; + cbe_cpufreq_pmi_policy_init(policy); + return 0; +} + +static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + cbe_cpufreq_pmi_policy_exit(policy); return 0; } @@ -129,6 +136,7 @@ static struct cpufreq_driver cbe_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = cbe_cpufreq_target, .init = cbe_cpufreq_cpu_init, + .exit = cbe_cpufreq_cpu_exit, .name = "cbe-cpufreq", .flags = CPUFREQ_CONST_LOOPS, }; @@ -139,15 +147,24 @@ static struct cpufreq_driver cbe_cpufreq_driver = { static int __init cbe_cpufreq_init(void) { + int ret; + if (!machine_is(cell)) return -ENODEV; - return cpufreq_register_driver(&cbe_cpufreq_driver); + cbe_cpufreq_pmi_init(); + + ret = cpufreq_register_driver(&cbe_cpufreq_driver); + if (ret) + cbe_cpufreq_pmi_exit(); + + return ret; } static void __exit cbe_cpufreq_exit(void) { cpufreq_unregister_driver(&cbe_cpufreq_driver); + cbe_cpufreq_pmi_exit(); } module_init(cbe_cpufreq_init); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h index 9d973519d669..00cd8633b0d9 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.h +++ b/drivers/cpufreq/ppc_cbe_cpufreq.h @@ -20,6 +20,14 @@ int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); #if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI) extern bool cbe_cpufreq_has_pmi; +void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy); +void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy); +void cbe_cpufreq_pmi_init(void); +void cbe_cpufreq_pmi_exit(void); #else #define cbe_cpufreq_has_pmi (0) +static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {} +static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {} +static inline void cbe_cpufreq_pmi_init(void) {} +static inline void cbe_cpufreq_pmi_exit(void) {} #endif diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c index 97c8ee4614b7..037fe23bc6ed 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -12,6 +12,7 @@ #include <linux/timer.h> #include <linux/init.h> #include <linux/of_platform.h> +#include <linux/pm_qos.h> #include <asm/processor.h> #include <asm/prom.h> @@ -24,8 +25,6 @@ #include "ppc_cbe_cpufreq.h" -static u8 pmi_slow_mode_limit[MAX_CBE]; - bool cbe_cpufreq_has_pmi = false; EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); @@ -65,64 +64,88 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) { + struct cpufreq_policy *policy; + struct freq_qos_request *req; u8 node, slow_mode; + int cpu, ret; BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); node = pmi_msg.data1; slow_mode = pmi_msg.data2; - pmi_slow_mode_limit[node] = slow_mode; + cpu = cbe_node_to_cpu(node); pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); -} - -static int pmi_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *cbe_freqs = policy->freq_table; - u8 node; - - /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY - * policy events?) - */ - node = cbe_cpu_to_node(policy->cpu); - - pr_debug("got notified, event=%lu, node=%u\n", event, node); - if (pmi_slow_mode_limit[node] != 0) { - pr_debug("limiting node %d to slow mode %d\n", - node, pmi_slow_mode_limit[node]); + policy = cpufreq_cpu_get(cpu); + if (!policy) { + pr_warn("cpufreq policy not found cpu%d\n", cpu); + return; + } - cpufreq_verify_within_limits(policy, 0, + req = policy->driver_data; - cbe_freqs[pmi_slow_mode_limit[node]].frequency); - } + ret = freq_qos_update_request(req, + policy->freq_table[slow_mode].frequency); + if (ret < 0) + pr_warn("Failed to update freq constraint: %d\n", ret); + else + pr_debug("limiting node %d to slow mode %d\n", node, slow_mode); - return 0; + cpufreq_cpu_put(policy); } -static struct notifier_block pmi_notifier_block = { - .notifier_call = pmi_notifier, -}; - static struct pmi_handler cbe_pmi_handler = { .type = PMI_TYPE_FREQ_CHANGE, .handle_pmi_message = cbe_cpufreq_handle_pmi, }; +void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) +{ + struct freq_qos_request *req; + int ret; + + if (!cbe_cpufreq_has_pmi) + return; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return; + + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX, + policy->freq_table[0].frequency); + if (ret < 0) { + pr_err("Failed to add freq constraint (%d)\n", ret); + kfree(req); + return; + } + policy->driver_data = req; +} +EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init); -static int __init cbe_cpufreq_pmi_init(void) +void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) { - cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; + struct freq_qos_request *req = policy->driver_data; - if (!cbe_cpufreq_has_pmi) - return -ENODEV; + if (cbe_cpufreq_has_pmi) { + freq_qos_remove_request(req); + kfree(req); + } +} +EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit); - cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); +void cbe_cpufreq_pmi_init(void) +{ + if (!pmi_register_handler(&cbe_pmi_handler)) + cbe_cpufreq_has_pmi = true; +} +EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init); - return 0; +void cbe_cpufreq_pmi_exit(void) +{ + pmi_unregister_handler(&cbe_pmi_handler); + cbe_cpufreq_has_pmi = false; } -device_initcall(cbe_cpufreq_pmi_init); +EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 4b0b50403901..fc92a8842e25 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -20,6 +20,7 @@ #define LUT_VOLT GENMASK(11, 0) #define LUT_ROW_SIZE 32 #define CLK_HW_DIV 2 +#define LUT_TURBO_IND 1 /* Register offsets */ #define REG_ENABLE 0x0 @@ -34,9 +35,12 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, unsigned int index) { void __iomem *perf_state_reg = policy->driver_data; + unsigned long freq = policy->freq_table[index].frequency; writel_relaxed(index, perf_state_reg); + arch_set_freq_scale(policy->related_cpus, freq, + policy->cpuinfo.max_freq); return 0; } @@ -63,6 +67,7 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, { void __iomem *perf_state_reg = policy->driver_data; int index; + unsigned long freq; index = policy->cached_resolved_idx; if (index < 0) @@ -70,16 +75,19 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, writel_relaxed(index, perf_state_reg); - return policy->freq_table[index].frequency; + freq = policy->freq_table[index].frequency; + arch_set_freq_scale(policy->related_cpus, freq, + policy->cpuinfo.max_freq); + + return freq; } static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, struct cpufreq_policy *policy, void __iomem *base) { - u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; + u32 data, src, lval, i, core_count, prev_freq = 0, freq; u32 volt; - unsigned int max_cores = cpumask_weight(policy->cpus); struct cpufreq_frequency_table *table; table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL); @@ -102,12 +110,12 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, else freq = cpu_hw_rate / 1000; - if (freq != prev_freq && core_count == max_cores) { + if (freq != prev_freq && core_count != LUT_TURBO_IND) { table[i].frequency = freq; dev_pm_opp_add(cpu_dev, freq * 1000, volt); dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, freq, core_count); - } else { + } else if (core_count == LUT_TURBO_IND) { table[i].frequency = CPUFREQ_ENTRY_INVALID; } @@ -115,14 +123,14 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, * Two of the same frequencies with the same core counts means * end of table */ - if (i > 0 && prev_freq == freq && prev_cc == core_count) { + if (i > 0 && prev_freq == freq) { struct cpufreq_frequency_table *prev = &table[i - 1]; /* * Only treat the last frequency that might be a boost * as the boost frequency */ - if (prev_cc != max_cores) { + if (prev->frequency == CPUFREQ_ENTRY_INVALID) { prev->frequency = prev_freq; prev->flags = CPUFREQ_BOOST_FREQ; dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt); @@ -131,7 +139,6 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, break; } - prev_cc = core_count; prev_freq = freq; } @@ -327,7 +334,7 @@ static int __init qcom_cpufreq_hw_init(void) { return platform_driver_register(&qcom_cpufreq_hw_driver); } -device_initcall(qcom_cpufreq_hw_init); +postcore_initcall(qcom_cpufreq_hw_init); static void __exit qcom_cpufreq_hw_exit(void) { diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c deleted file mode 100644 index dd64dcf89c74..000000000000 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ /dev/null @@ -1,249 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018, The Linux Foundation. All rights reserved. - */ - -/* - * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors, - * the CPU frequency subset and voltage value of each OPP varies - * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables - * defines the voltage and frequency value based on the msm-id in SMEM - * and speedbin blown in the efuse combination. - * The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC - * to provide the OPP framework with required information. - * This is used to determine the voltage and frequency value for each OPP of - * operating-points-v2 table when it is parsed by the OPP framework. - */ - -#include <linux/cpu.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/nvmem-consumer.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <linux/pm_opp.h> -#include <linux/slab.h> -#include <linux/soc/qcom/smem.h> - -#define MSM_ID_SMEM 137 - -enum _msm_id { - MSM8996V3 = 0xF6ul, - APQ8096V3 = 0x123ul, - MSM8996SG = 0x131ul, - APQ8096SG = 0x138ul, -}; - -enum _msm8996_version { - MSM8996_V3, - MSM8996_SG, - NUM_OF_MSM8996_VERSIONS, -}; - -static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; - -static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) -{ - size_t len; - u32 *msm_id; - enum _msm8996_version version; - - msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len); - if (IS_ERR(msm_id)) - return NUM_OF_MSM8996_VERSIONS; - - /* The first 4 bytes are format, next to them is the actual msm-id */ - msm_id++; - - switch ((enum _msm_id)*msm_id) { - case MSM8996V3: - case APQ8096V3: - version = MSM8996_V3; - break; - case MSM8996SG: - case APQ8096SG: - version = MSM8996_SG; - break; - default: - version = NUM_OF_MSM8996_VERSIONS; - } - - return version; -} - -static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) -{ - struct opp_table **opp_tables; - enum _msm8996_version msm8996_version; - struct nvmem_cell *speedbin_nvmem; - struct device_node *np; - struct device *cpu_dev; - unsigned cpu; - u8 *speedbin; - u32 versions; - size_t len; - int ret; - - cpu_dev = get_cpu_device(0); - if (!cpu_dev) - return -ENODEV; - - msm8996_version = qcom_cpufreq_kryo_get_msm_id(); - if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { - dev_err(cpu_dev, "Not Snapdragon 820/821!"); - return -ENODEV; - } - - np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); - if (!np) - return -ENOENT; - - ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); - if (!ret) { - of_node_put(np); - return -ENOENT; - } - - speedbin_nvmem = of_nvmem_cell_get(np, NULL); - of_node_put(np); - if (IS_ERR(speedbin_nvmem)) { - if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) - dev_err(cpu_dev, "Could not get nvmem cell: %ld\n", - PTR_ERR(speedbin_nvmem)); - return PTR_ERR(speedbin_nvmem); - } - - speedbin = nvmem_cell_read(speedbin_nvmem, &len); - nvmem_cell_put(speedbin_nvmem); - if (IS_ERR(speedbin)) - return PTR_ERR(speedbin); - - switch (msm8996_version) { - case MSM8996_V3: - versions = 1 << (unsigned int)(*speedbin); - break; - case MSM8996_SG: - versions = 1 << ((unsigned int)(*speedbin) + 4); - break; - default: - BUG(); - break; - } - kfree(speedbin); - - opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL); - if (!opp_tables) - return -ENOMEM; - - for_each_possible_cpu(cpu) { - cpu_dev = get_cpu_device(cpu); - if (NULL == cpu_dev) { - ret = -ENODEV; - goto free_opp; - } - - opp_tables[cpu] = dev_pm_opp_set_supported_hw(cpu_dev, - &versions, 1); - if (IS_ERR(opp_tables[cpu])) { - ret = PTR_ERR(opp_tables[cpu]); - dev_err(cpu_dev, "Failed to set supported hardware\n"); - goto free_opp; - } - } - - cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, - NULL, 0); - if (!IS_ERR(cpufreq_dt_pdev)) { - platform_set_drvdata(pdev, opp_tables); - return 0; - } - - ret = PTR_ERR(cpufreq_dt_pdev); - dev_err(cpu_dev, "Failed to register platform device\n"); - -free_opp: - for_each_possible_cpu(cpu) { - if (IS_ERR_OR_NULL(opp_tables[cpu])) - break; - dev_pm_opp_put_supported_hw(opp_tables[cpu]); - } - kfree(opp_tables); - - return ret; -} - -static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) -{ - struct opp_table **opp_tables = platform_get_drvdata(pdev); - unsigned int cpu; - - platform_device_unregister(cpufreq_dt_pdev); - - for_each_possible_cpu(cpu) - dev_pm_opp_put_supported_hw(opp_tables[cpu]); - - kfree(opp_tables); - - return 0; -} - -static struct platform_driver qcom_cpufreq_kryo_driver = { - .probe = qcom_cpufreq_kryo_probe, - .remove = qcom_cpufreq_kryo_remove, - .driver = { - .name = "qcom-cpufreq-kryo", - }, -}; - -static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { - { .compatible = "qcom,apq8096", }, - { .compatible = "qcom,msm8996", }, - {} -}; - -/* - * Since the driver depends on smem and nvmem drivers, which may - * return EPROBE_DEFER, all the real activity is done in the probe, - * which may be defered as well. The init here is only registering - * the driver and the platform device. - */ -static int __init qcom_cpufreq_kryo_init(void) -{ - struct device_node *np = of_find_node_by_path("/"); - const struct of_device_id *match; - int ret; - - if (!np) - return -ENODEV; - - match = of_match_node(qcom_cpufreq_kryo_match_list, np); - of_node_put(np); - if (!match) - return -ENODEV; - - ret = platform_driver_register(&qcom_cpufreq_kryo_driver); - if (unlikely(ret < 0)) - return ret; - - kryo_cpufreq_pdev = platform_device_register_simple( - "qcom-cpufreq-kryo", -1, NULL, 0); - ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev); - if (0 == ret) - return 0; - - platform_driver_unregister(&qcom_cpufreq_kryo_driver); - return ret; -} -module_init(qcom_cpufreq_kryo_init); - -static void __exit qcom_cpufreq_kryo_exit(void) -{ - platform_device_unregister(kryo_cpufreq_pdev); - platform_driver_unregister(&qcom_cpufreq_kryo_driver); -} -module_exit(qcom_cpufreq_kryo_exit); - -MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c new file mode 100644 index 000000000000..f0d2d5035413 --- /dev/null +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +/* + * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors, + * the CPU frequency subset and voltage value of each OPP varies + * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables + * defines the voltage and frequency value based on the msm-id in SMEM + * and speedbin blown in the efuse combination. + * The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC + * to provide the OPP framework with required information. + * This is used to determine the voltage and frequency value for each OPP of + * operating-points-v2 table when it is parsed by the OPP framework. + */ + +#include <linux/cpu.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/slab.h> +#include <linux/soc/qcom/smem.h> + +#define MSM_ID_SMEM 137 + +enum _msm_id { + MSM8996V3 = 0xF6ul, + APQ8096V3 = 0x123ul, + MSM8996SG = 0x131ul, + APQ8096SG = 0x138ul, +}; + +enum _msm8996_version { + MSM8996_V3, + MSM8996_SG, + NUM_OF_MSM8996_VERSIONS, +}; + +struct qcom_cpufreq_drv; + +struct qcom_cpufreq_match_data { + int (*get_version)(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + struct qcom_cpufreq_drv *drv); + const char **genpd_names; +}; + +struct qcom_cpufreq_drv { + struct opp_table **opp_tables; + struct opp_table **genpd_opp_tables; + u32 versions; + const struct qcom_cpufreq_match_data *data; +}; + +static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; + +static enum _msm8996_version qcom_cpufreq_get_msm_id(void) +{ + size_t len; + u32 *msm_id; + enum _msm8996_version version; + + msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len); + if (IS_ERR(msm_id)) + return NUM_OF_MSM8996_VERSIONS; + + /* The first 4 bytes are format, next to them is the actual msm-id */ + msm_id++; + + switch ((enum _msm_id)*msm_id) { + case MSM8996V3: + case APQ8096V3: + version = MSM8996_V3; + break; + case MSM8996SG: + case APQ8096SG: + version = MSM8996_SG; + break; + default: + version = NUM_OF_MSM8996_VERSIONS; + } + + return version; +} + +static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev, + struct nvmem_cell *speedbin_nvmem, + struct qcom_cpufreq_drv *drv) +{ + size_t len; + u8 *speedbin; + enum _msm8996_version msm8996_version; + + msm8996_version = qcom_cpufreq_get_msm_id(); + if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { + dev_err(cpu_dev, "Not Snapdragon 820/821!"); + return -ENODEV; + } + + speedbin = nvmem_cell_read(speedbin_nvmem, &len); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + switch (msm8996_version) { + case MSM8996_V3: + drv->versions = 1 << (unsigned int)(*speedbin); + break; + case MSM8996_SG: + drv->versions = 1 << ((unsigned int)(*speedbin) + 4); + break; + default: + BUG(); + break; + } + + kfree(speedbin); + return 0; +} + +static const struct qcom_cpufreq_match_data match_data_kryo = { + .get_version = qcom_cpufreq_kryo_name_version, +}; + +static const char *qcs404_genpd_names[] = { "cpr", NULL }; + +static const struct qcom_cpufreq_match_data match_data_qcs404 = { + .genpd_names = qcs404_genpd_names, +}; + +static int qcom_cpufreq_probe(struct platform_device *pdev) +{ + struct qcom_cpufreq_drv *drv; + struct nvmem_cell *speedbin_nvmem; + struct device_node *np; + struct device *cpu_dev; + unsigned cpu; + const struct of_device_id *match; + int ret; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return -ENODEV; + + np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!np) + return -ENOENT; + + ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); + if (!ret) { + of_node_put(np); + return -ENOENT; + } + + drv = kzalloc(sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + match = pdev->dev.platform_data; + drv->data = match->data; + if (!drv->data) { + ret = -ENODEV; + goto free_drv; + } + + if (drv->data->get_version) { + speedbin_nvmem = of_nvmem_cell_get(np, NULL); + if (IS_ERR(speedbin_nvmem)) { + if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) + dev_err(cpu_dev, + "Could not get nvmem cell: %ld\n", + PTR_ERR(speedbin_nvmem)); + ret = PTR_ERR(speedbin_nvmem); + goto free_drv; + } + + ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv); + if (ret) { + nvmem_cell_put(speedbin_nvmem); + goto free_drv; + } + nvmem_cell_put(speedbin_nvmem); + } + of_node_put(np); + + drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables), + GFP_KERNEL); + if (!drv->opp_tables) { + ret = -ENOMEM; + goto free_drv; + } + + drv->genpd_opp_tables = kcalloc(num_possible_cpus(), + sizeof(*drv->genpd_opp_tables), + GFP_KERNEL); + if (!drv->genpd_opp_tables) { + ret = -ENOMEM; + goto free_opp; + } + + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (NULL == cpu_dev) { + ret = -ENODEV; + goto free_genpd_opp; + } + + if (drv->data->get_version) { + drv->opp_tables[cpu] = + dev_pm_opp_set_supported_hw(cpu_dev, + &drv->versions, 1); + if (IS_ERR(drv->opp_tables[cpu])) { + ret = PTR_ERR(drv->opp_tables[cpu]); + dev_err(cpu_dev, + "Failed to set supported hardware\n"); + goto free_genpd_opp; + } + } + + if (drv->data->genpd_names) { + drv->genpd_opp_tables[cpu] = + dev_pm_opp_attach_genpd(cpu_dev, + drv->data->genpd_names, + NULL); + if (IS_ERR(drv->genpd_opp_tables[cpu])) { + ret = PTR_ERR(drv->genpd_opp_tables[cpu]); + if (ret != -EPROBE_DEFER) + dev_err(cpu_dev, + "Could not attach to pm_domain: %d\n", + ret); + goto free_genpd_opp; + } + } + } + + cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, + NULL, 0); + if (!IS_ERR(cpufreq_dt_pdev)) { + platform_set_drvdata(pdev, drv); + return 0; + } + + ret = PTR_ERR(cpufreq_dt_pdev); + dev_err(cpu_dev, "Failed to register platform device\n"); + +free_genpd_opp: + for_each_possible_cpu(cpu) { + if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu])) + break; + dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]); + } + kfree(drv->genpd_opp_tables); +free_opp: + for_each_possible_cpu(cpu) { + if (IS_ERR_OR_NULL(drv->opp_tables[cpu])) + break; + dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]); + } + kfree(drv->opp_tables); +free_drv: + kfree(drv); + + return ret; +} + +static int qcom_cpufreq_remove(struct platform_device *pdev) +{ + struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev); + unsigned int cpu; + + platform_device_unregister(cpufreq_dt_pdev); + + for_each_possible_cpu(cpu) { + if (drv->opp_tables[cpu]) + dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]); + if (drv->genpd_opp_tables[cpu]) + dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]); + } + + kfree(drv->opp_tables); + kfree(drv->genpd_opp_tables); + kfree(drv); + + return 0; +} + +static struct platform_driver qcom_cpufreq_driver = { + .probe = qcom_cpufreq_probe, + .remove = qcom_cpufreq_remove, + .driver = { + .name = "qcom-cpufreq-nvmem", + }, +}; + +static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { + { .compatible = "qcom,apq8096", .data = &match_data_kryo }, + { .compatible = "qcom,msm8996", .data = &match_data_kryo }, + { .compatible = "qcom,qcs404", .data = &match_data_qcs404 }, + {}, +}; + +/* + * Since the driver depends on smem and nvmem drivers, which may + * return EPROBE_DEFER, all the real activity is done in the probe, + * which may be defered as well. The init here is only registering + * the driver and the platform device. + */ +static int __init qcom_cpufreq_init(void) +{ + struct device_node *np = of_find_node_by_path("/"); + const struct of_device_id *match; + int ret; + + if (!np) + return -ENODEV; + + match = of_match_node(qcom_cpufreq_match_list, np); + of_node_put(np); + if (!match) + return -ENODEV; + + ret = platform_driver_register(&qcom_cpufreq_driver); + if (unlikely(ret < 0)) + return ret; + + cpufreq_pdev = platform_device_register_data(NULL, "qcom-cpufreq-nvmem", + -1, match, sizeof(*match)); + ret = PTR_ERR_OR_ZERO(cpufreq_pdev); + if (0 == ret) + return 0; + + platform_driver_unregister(&qcom_cpufreq_driver); + return ret; +} +module_init(qcom_cpufreq_init); + +static void __exit qcom_cpufreq_exit(void) +{ + platform_device_unregister(cpufreq_pdev); + platform_driver_unregister(&qcom_cpufreq_driver); +} +module_exit(qcom_cpufreq_exit); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. CPUfreq driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 106910351c41..5c221bc90210 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; int ret; + struct cpufreq_policy *policy; mutex_lock(&cpufreq_lock); @@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, */ if (s3c_freq->is_dvs) { pr_debug("cpufreq: leave dvs on reboot\n"); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0); + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0); + cpufreq_cpu_put(policy); + if (ret < 0) return NOTIFY_BAD; } diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index af0c00dabb22..c6bdfc308e99 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -19,7 +19,6 @@ static struct regulator *vddarm; static unsigned long regulator_latency; -#ifdef CONFIG_CPU_S3C6410 struct s3c64xx_dvfs { unsigned int vddarm_min; unsigned int vddarm_max; @@ -48,7 +47,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = { { 0, 4, 800000 }, { 0, 0, CPUFREQ_TABLE_END }, }; -#endif static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) @@ -149,11 +147,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) if (policy->cpu != 0) return -EINVAL; - if (s3c64xx_freq_table == NULL) { - pr_err("No frequency information for this CPU\n"); - return -ENODEV; - } - policy->clk = clk_get(NULL, "armclk"); if (IS_ERR(policy->clk)) { pr_err("Unable to obtain ARMCLK: %ld\n", diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5d10030f2560..e84281e2561d 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret; + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0); + cpufreq_cpu_put(policy); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); if (ret < 0) return NOTIFY_BAD; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index e6182c89df79..61623e2ff149 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -261,7 +261,7 @@ static void scmi_cpufreq_remove(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_PERF }, + { SCMI_PROTOCOL_PERF, "cpufreq" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 2b51e0718c9f..20d1f85d5f5a 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -1,8 +1,6 @@ /* * System Control and Power Interface (SCPI) based CPUFreq Interface driver * - * It provides necessary ops to arm_big_little cpufreq driver. - * * Copyright (C) 2015 ARM Ltd. * Sudeep Holla <sudeep.holla@arm.com> * diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 5096c0ab781b..0ac265d47ef0 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -87,7 +87,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data); } -static int sh_cpufreq_verify(struct cpufreq_policy *policy) +static int sh_cpufreq_verify(struct cpufreq_policy_data *policy) { struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); struct cpufreq_frequency_table *freq_table; diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c new file mode 100644 index 000000000000..9907a165135b --- /dev/null +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Allwinner CPUFreq nvmem based driver + * + * The sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to + * provide the OPP framework with required information. + * + * Copyright (C) 2019 Yangtao Li <tiny.windzz@gmail.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/slab.h> + +#define MAX_NAME_LEN 7 + +#define NVMEM_MASK 0x7 +#define NVMEM_SHIFT 5 + +static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev; + +/** + * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value + * @versions: Set to the value parsed from efuse + * + * Returns 0 if success. + */ +static int sun50i_cpufreq_get_efuse(u32 *versions) +{ + struct nvmem_cell *speedbin_nvmem; + struct device_node *np; + struct device *cpu_dev; + u32 *speedbin, efuse_value; + size_t len; + int ret; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return -ENODEV; + + np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!np) + return -ENOENT; + + ret = of_device_is_compatible(np, + "allwinner,sun50i-h6-operating-points"); + if (!ret) { + of_node_put(np); + return -ENOENT; + } + + speedbin_nvmem = of_nvmem_cell_get(np, NULL); + of_node_put(np); + if (IS_ERR(speedbin_nvmem)) { + if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) + pr_err("Could not get nvmem cell: %ld\n", + PTR_ERR(speedbin_nvmem)); + return PTR_ERR(speedbin_nvmem); + } + + speedbin = nvmem_cell_read(speedbin_nvmem, &len); + nvmem_cell_put(speedbin_nvmem); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); + + efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK; + + /* + * We treat unexpected efuse values as if the SoC was from + * the slowest bin. Expected efuse values are 1-3, slowest + * to fastest. + */ + if (efuse_value >= 1 && efuse_value <= 3) + *versions = efuse_value - 1; + else + *versions = 0; + + kfree(speedbin); + return 0; +}; + +static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) +{ + struct opp_table **opp_tables; + char name[MAX_NAME_LEN]; + unsigned int cpu; + u32 speed = 0; + int ret; + + opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), + GFP_KERNEL); + if (!opp_tables) + return -ENOMEM; + + ret = sun50i_cpufreq_get_efuse(&speed); + if (ret) + return ret; + + snprintf(name, MAX_NAME_LEN, "speed%d", speed); + + for_each_possible_cpu(cpu) { + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) { + ret = -ENODEV; + goto free_opp; + } + + opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name); + if (IS_ERR(opp_tables[cpu])) { + ret = PTR_ERR(opp_tables[cpu]); + pr_err("Failed to set prop name\n"); + goto free_opp; + } + } + + cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, + NULL, 0); + if (!IS_ERR(cpufreq_dt_pdev)) { + platform_set_drvdata(pdev, opp_tables); + return 0; + } + + ret = PTR_ERR(cpufreq_dt_pdev); + pr_err("Failed to register platform device\n"); + +free_opp: + for_each_possible_cpu(cpu) { + if (IS_ERR_OR_NULL(opp_tables[cpu])) + break; + dev_pm_opp_put_prop_name(opp_tables[cpu]); + } + kfree(opp_tables); + + return ret; +} + +static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev) +{ + struct opp_table **opp_tables = platform_get_drvdata(pdev); + unsigned int cpu; + + platform_device_unregister(cpufreq_dt_pdev); + + for_each_possible_cpu(cpu) + dev_pm_opp_put_prop_name(opp_tables[cpu]); + + kfree(opp_tables); + + return 0; +} + +static struct platform_driver sun50i_cpufreq_driver = { + .probe = sun50i_cpufreq_nvmem_probe, + .remove = sun50i_cpufreq_nvmem_remove, + .driver = { + .name = "sun50i-cpufreq-nvmem", + }, +}; + +static const struct of_device_id sun50i_cpufreq_match_list[] = { + { .compatible = "allwinner,sun50i-h6" }, + {} +}; + +static const struct of_device_id *sun50i_cpufreq_match_node(void) +{ + const struct of_device_id *match; + struct device_node *np; + + np = of_find_node_by_path("/"); + match = of_match_node(sun50i_cpufreq_match_list, np); + of_node_put(np); + + return match; +} + +/* + * Since the driver depends on nvmem drivers, which may return EPROBE_DEFER, + * all the real activity is done in the probe, which may be defered as well. + * The init here is only registering the driver and the platform device. + */ +static int __init sun50i_cpufreq_init(void) +{ + const struct of_device_id *match; + int ret; + + match = sun50i_cpufreq_match_node(); + if (!match) + return -ENODEV; + + ret = platform_driver_register(&sun50i_cpufreq_driver); + if (unlikely(ret < 0)) + return ret; + + sun50i_cpufreq_pdev = + platform_device_register_simple("sun50i-cpufreq-nvmem", + -1, NULL, 0); + ret = PTR_ERR_OR_ZERO(sun50i_cpufreq_pdev); + if (ret == 0) + return 0; + + platform_driver_unregister(&sun50i_cpufreq_driver); + return ret; +} +module_init(sun50i_cpufreq_init); + +static void __exit sun50i_cpufreq_exit(void) +{ + platform_device_unregister(sun50i_cpufreq_pdev); + platform_driver_unregister(&sun50i_cpufreq_driver); +} +module_exit(sun50i_cpufreq_exit); + +MODULE_DESCRIPTION("Sun50i-h6 cpufreq driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index 4f0c637b3b49..7a1ea6fdcab6 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> +#include <linux/cpufreq.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -128,8 +129,66 @@ out_put_np: return ret; } +static int __maybe_unused tegra124_cpufreq_suspend(struct device *dev) +{ + struct tegra124_cpufreq_priv *priv = dev_get_drvdata(dev); + int err; + + /* + * PLLP rate 408Mhz is below the CPU Fmax at Vmin and is safe to + * use during suspend and resume. So, switch the CPU clock source + * to PLLP and disable DFLL. + */ + err = clk_set_parent(priv->cpu_clk, priv->pllp_clk); + if (err < 0) { + dev_err(dev, "failed to reparent to PLLP: %d\n", err); + return err; + } + + clk_disable_unprepare(priv->dfll_clk); + + return 0; +} + +static int __maybe_unused tegra124_cpufreq_resume(struct device *dev) +{ + struct tegra124_cpufreq_priv *priv = dev_get_drvdata(dev); + int err; + + /* + * Warmboot code powers up the CPU with PLLP clock source. + * Enable DFLL clock and switch CPU clock source back to DFLL. + */ + err = clk_prepare_enable(priv->dfll_clk); + if (err < 0) { + dev_err(dev, "failed to enable DFLL clock for CPU: %d\n", err); + goto disable_cpufreq; + } + + err = clk_set_parent(priv->cpu_clk, priv->dfll_clk); + if (err < 0) { + dev_err(dev, "failed to reparent to DFLL clock: %d\n", err); + goto disable_dfll; + } + + return 0; + +disable_dfll: + clk_disable_unprepare(priv->dfll_clk); +disable_cpufreq: + disable_cpufreq(); + + return err; +} + +static const struct dev_pm_ops tegra124_cpufreq_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend, + tegra124_cpufreq_resume) +}; + static struct platform_driver tegra124_cpufreq_platdrv = { .driver.name = "cpufreq-tegra124", + .driver.pm = &tegra124_cpufreq_pm_ops, .probe = tegra124_cpufreq_probe, }; diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index bcecb068b51b..2e233ad72758 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) { struct tegra186_cpufreq_data *data; struct tegra_bpmp *bpmp; - struct resource *res; unsigned int i = 0, err; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); @@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) if (IS_ERR(bpmp)) return PTR_ERR(bpmp); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { err = PTR_ERR(data->regs); goto put_bpmp; diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 2ad1ae17932d..557cb513bf7f 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -31,11 +31,17 @@ #define DRA7_EFUSE_OD_MPU_OPP BIT(1) #define DRA7_EFUSE_HIGH_MPU_OPP BIT(2) +#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C +#define OMAP3_CONTROL_IDCODE 0x4830A204 +#define OMAP34xx_ProdID_SKUID 0x4830A20C +#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270) + #define VERSION_COUNT 2 struct ti_cpufreq_data; struct ti_cpufreq_soc_data { + const char * const *reg_names; unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data, unsigned long efuse); unsigned long efuse_fallback; @@ -77,6 +83,7 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data, case DRA7_EFUSE_HAS_ALL_MPU_OPP: case DRA7_EFUSE_HAS_HIGH_MPU_OPP: calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP; + /* Fall through */ case DRA7_EFUSE_HAS_OD_MPU_OPP: calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP; } @@ -84,6 +91,13 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data, return calculated_efuse; } +static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data, + unsigned long efuse) +{ + /* OPP enable bit ("Speed Binned") */ + return BIT(efuse); +} + static struct ti_cpufreq_soc_data am3x_soc_data = { .efuse_xlate = amx3_efuse_xlate, .efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ, @@ -111,6 +125,74 @@ static struct ti_cpufreq_soc_data dra7_soc_data = { .multi_regulator = true, }; +/* + * OMAP35x TRM (SPRUF98K): + * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions. + * Control OMAP Status Register 15:0 (Address 0x4800 244C) + * to separate between omap3503, omap3515, omap3525, omap3530 + * and feature presence. + * There are encodings for versions limited to 400/266MHz + * but we ignore. + * Not clear if this also holds for omap34xx. + * some eFuse values e.g. CONTROL_FUSE_OPP1_VDD1 + * are stored in the SYSCON register range + * Register 0x4830A20C [ProdID.SKUID] [0:3] + * 0x0 for normal 600/430MHz device. + * 0x8 for 720/520MHz device. + * Not clear what omap34xx value is. + */ + +static struct ti_cpufreq_soc_data omap34xx_soc_data = { + .efuse_xlate = omap3_efuse_xlate, + .efuse_offset = OMAP34xx_ProdID_SKUID - OMAP3_SYSCON_BASE, + .efuse_shift = 3, + .efuse_mask = BIT(3), + .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, + .multi_regulator = false, +}; + +/* + * AM/DM37x TRM (SPRUGN4M) + * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions. + * Control Device Status Register 15:0 (Address 0x4800 244C) + * to separate between am3703, am3715, dm3725, dm3730 + * and feature presence. + * Speed Binned = Bit 9 + * 0 800/600 MHz + * 1 1000/800 MHz + * some eFuse values e.g. CONTROL_FUSE_OPP 1G_VDD1 + * are stored in the SYSCON register range. + * There is no 0x4830A20C [ProdID.SKUID] register (exists but + * seems to always read as 0). + */ + +static const char * const omap3_reg_names[] = {"cpu0", "vbb"}; + +static struct ti_cpufreq_soc_data omap36xx_soc_data = { + .reg_names = omap3_reg_names, + .efuse_xlate = omap3_efuse_xlate, + .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE, + .efuse_shift = 9, + .efuse_mask = BIT(9), + .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, + .multi_regulator = true, +}; + +/* + * AM3517 is quite similar to AM/DM37x except that it has no + * high speed grade eFuse and no abb ldo + */ + +static struct ti_cpufreq_soc_data am3517_soc_data = { + .efuse_xlate = omap3_efuse_xlate, + .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE, + .efuse_shift = 0, + .efuse_mask = 0, + .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, + .multi_regulator = false, +}; + + /** * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC * @opp_data: pointer to ti_cpufreq_data context @@ -127,7 +209,17 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data, ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, &efuse); - if (ret) { + if (ret == -EIO) { + /* not a syscon register! */ + void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + + opp_data->soc_data->efuse_offset, 4); + + if (!regs) + return -ENOMEM; + efuse = readl(regs); + iounmap(regs); + } + else if (ret) { dev_err(dev, "Failed to read the efuse value from syscon: %d\n", ret); @@ -158,7 +250,17 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data, ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset, &revision); - if (ret) { + if (ret == -EIO) { + /* not a syscon register! */ + void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + + opp_data->soc_data->rev_offset, 4); + + if (!regs) + return -ENOMEM; + revision = readl(regs); + iounmap(regs); + } + else if (ret) { dev_err(dev, "Failed to read the revision number from syscon: %d\n", ret); @@ -188,8 +290,14 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, + { .compatible = "ti,am3517", .data = &am3517_soc_data, }, { .compatible = "ti,am43", .data = &am4x_soc_data, }, { .compatible = "ti,dra7", .data = &dra7_soc_data }, + { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, }, + { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, }, + /* legacy */ + { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, }, + { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, }, {}, }; @@ -211,7 +319,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) const struct of_device_id *match; struct opp_table *ti_opp_table; struct ti_cpufreq_data *opp_data; - const char * const reg_names[] = {"vdd", "vbb"}; + const char * const default_reg_names[] = {"vdd", "vbb"}; int ret; match = dev_get_platdata(&pdev->dev); @@ -267,9 +375,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev) opp_data->opp_table = ti_opp_table; if (opp_data->soc_data->multi_regulator) { + const char * const *reg_names = default_reg_names; + + if (opp_data->soc_data->reg_names) + reg_names = opp_data->soc_data->reg_names; ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev, reg_names, - ARRAY_SIZE(reg_names)); + ARRAY_SIZE(default_reg_names)); if (IS_ERR(ti_opp_table)) { dev_pm_opp_put_supported_hw(opp_data->opp_table); ret = PTR_ERR(ti_opp_table); diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 707dbc1b7ac8..98d392196df2 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -22,7 +22,7 @@ static struct cpufreq_driver ucv2_driver; /* make sure that only the "userspace" governor is run * -- anything else wouldn't make sense on this platform, anyway. */ -static int ucv2_verify_speed(struct cpufreq_policy *policy) +static int ucv2_verify_speed(struct cpufreq_policy_data *policy) { if (policy->cpu) return -EINVAL; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 53237289e606..83c85d3d67e3 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -1,61 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Versatile Express SPC CPUFreq Interface driver * - * It provides necessary ops to arm_big_little cpufreq driver. + * Copyright (C) 2013 - 2019 ARM Ltd. + * Sudeep Holla <sudeep.holla@arm.com> * - * Copyright (C) 2013 ARM Ltd. - * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2013 Linaro. + * Viresh Kumar <viresh.kumar@linaro.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/clk.h> #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/cpu_cooling.h> +#include <linux/device.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> +#include <linux/slab.h> +#include <linux/topology.h> #include <linux/types.h> -#include "arm_big_little.h" +/* Currently we support only two clusters */ +#define A15_CLUSTER 0 +#define A7_CLUSTER 1 +#define MAX_CLUSTERS 2 + +#ifdef CONFIG_BL_SWITCHER +#include <asm/bL_switcher.h> +static bool bL_switching_enabled; +#define is_bL_switching_enabled() bL_switching_enabled +#define set_switching_enabled(x) (bL_switching_enabled = (x)) +#else +#define is_bL_switching_enabled() false +#define set_switching_enabled(x) do { } while (0) +#define bL_switch_request(...) do { } while (0) +#define bL_switcher_put_enabled() do { } while (0) +#define bL_switcher_get_enabled() do { } while (0) +#endif + +#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) +#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) + +static struct thermal_cooling_device *cdev[MAX_CLUSTERS]; +static struct clk *clk[MAX_CLUSTERS]; +static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1]; +static atomic_t cluster_usage[MAX_CLUSTERS + 1]; + +static unsigned int clk_big_min; /* (Big) clock frequencies */ +static unsigned int clk_little_max; /* Maximum clock frequency (Little) */ + +static DEFINE_PER_CPU(unsigned int, physical_cluster); +static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq); + +static struct mutex cluster_lock[MAX_CLUSTERS]; + +static inline int raw_cpu_to_cluster(int cpu) +{ + return topology_physical_package_id(cpu); +} + +static inline int cpu_to_cluster(int cpu) +{ + return is_bL_switching_enabled() ? + MAX_CLUSTERS : raw_cpu_to_cluster(cpu); +} + +static unsigned int find_cluster_maxfreq(int cluster) +{ + int j; + u32 max_freq = 0, cpu_freq; + + for_each_online_cpu(j) { + cpu_freq = per_cpu(cpu_last_req_freq, j); + + if (cluster == per_cpu(physical_cluster, j) && + max_freq < cpu_freq) + max_freq = cpu_freq; + } + + return max_freq; +} + +static unsigned int clk_get_cpu_rate(unsigned int cpu) +{ + u32 cur_cluster = per_cpu(physical_cluster, cpu); + u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; + + /* For switcher we use virtual A7 clock rates */ + if (is_bL_switching_enabled()) + rate = VIRT_FREQ(cur_cluster, rate); + + return rate; +} + +static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu) +{ + if (is_bL_switching_enabled()) + return per_cpu(cpu_last_req_freq, cpu); + else + return clk_get_cpu_rate(cpu); +} + +static unsigned int +ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) +{ + u32 new_rate, prev_rate; + int ret; + bool bLs = is_bL_switching_enabled(); + + mutex_lock(&cluster_lock[new_cluster]); + + if (bLs) { + prev_rate = per_cpu(cpu_last_req_freq, cpu); + per_cpu(cpu_last_req_freq, cpu) = rate; + per_cpu(physical_cluster, cpu) = new_cluster; + + new_rate = find_cluster_maxfreq(new_cluster); + new_rate = ACTUAL_FREQ(new_cluster, new_rate); + } else { + new_rate = rate; + } + + ret = clk_set_rate(clk[new_cluster], new_rate * 1000); + if (!ret) { + /* + * FIXME: clk_set_rate hasn't returned an error here however it + * may be that clk_change_rate failed due to hardware or + * firmware issues and wasn't able to report that due to the + * current design of the clk core layer. To work around this + * problem we will read back the clock rate and check it is + * correct. This needs to be removed once clk core is fixed. + */ + if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) + ret = -EIO; + } + + if (WARN_ON(ret)) { + if (bLs) { + per_cpu(cpu_last_req_freq, cpu) = prev_rate; + per_cpu(physical_cluster, cpu) = old_cluster; + } + + mutex_unlock(&cluster_lock[new_cluster]); + + return ret; + } + + mutex_unlock(&cluster_lock[new_cluster]); + + /* Recalc freq for old cluster when switching clusters */ + if (old_cluster != new_cluster) { + /* Switch cluster */ + bL_switch_request(cpu, new_cluster); + + mutex_lock(&cluster_lock[old_cluster]); + + /* Set freq of old cluster if there are cpus left on it */ + new_rate = find_cluster_maxfreq(old_cluster); + new_rate = ACTUAL_FREQ(old_cluster, new_rate); + + if (new_rate && + clk_set_rate(clk[old_cluster], new_rate * 1000)) { + pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", + __func__, ret, old_cluster); + } + mutex_unlock(&cluster_lock[old_cluster]); + } + + return 0; +} + +/* Set clock frequency */ +static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int index) +{ + u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; + unsigned int freqs_new; + int ret; + + cur_cluster = cpu_to_cluster(cpu); + new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); + + freqs_new = freq_table[cur_cluster][index].frequency; + + if (is_bL_switching_enabled()) { + if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min) + new_cluster = A7_CLUSTER; + else if (actual_cluster == A7_CLUSTER && + freqs_new > clk_little_max) + new_cluster = A15_CLUSTER; + } + + ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster, + freqs_new); + + if (!ret) { + arch_set_freq_scale(policy->related_cpus, freqs_new, + policy->cpuinfo.max_freq); + } + + return ret; +} + +static inline u32 get_table_count(struct cpufreq_frequency_table *table) +{ + int count; + + for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++) + ; + + return count; +} + +/* get the minimum frequency in the cpufreq_frequency_table */ +static inline u32 get_table_min(struct cpufreq_frequency_table *table) +{ + struct cpufreq_frequency_table *pos; + u32 min_freq = ~0; + + cpufreq_for_each_entry(pos, table) + if (pos->frequency < min_freq) + min_freq = pos->frequency; + return min_freq; +} + +/* get the maximum frequency in the cpufreq_frequency_table */ +static inline u32 get_table_max(struct cpufreq_frequency_table *table) +{ + struct cpufreq_frequency_table *pos; + u32 max_freq = 0; + + cpufreq_for_each_entry(pos, table) + if (pos->frequency > max_freq) + max_freq = pos->frequency; + return max_freq; +} + +static bool search_frequency(struct cpufreq_frequency_table *table, int size, + unsigned int freq) +{ + int count; + + for (count = 0; count < size; count++) { + if (table[count].frequency == freq) + return true; + } + + return false; +} + +static int merge_cluster_tables(void) +{ + int i, j, k = 0, count = 1; + struct cpufreq_frequency_table *table; + + for (i = 0; i < MAX_CLUSTERS; i++) + count += get_table_count(freq_table[i]); + + table = kcalloc(count, sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + freq_table[MAX_CLUSTERS] = table; + + /* Add in reverse order to get freqs in increasing order */ + for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) { + for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; + j++) { + if (i == A15_CLUSTER && + search_frequency(table, count, freq_table[i][j].frequency)) + continue; /* skip duplicates */ + table[k++].frequency = + VIRT_FREQ(i, freq_table[i][j].frequency); + } + } + + table[k].driver_data = k; + table[k].frequency = CPUFREQ_TABLE_END; + + return 0; +} -static int ve_spc_init_opp_table(const struct cpumask *cpumask) +static void _put_cluster_clk_and_freq_table(struct device *cpu_dev, + const struct cpumask *cpumask) { - struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask)); + u32 cluster = raw_cpu_to_cluster(cpu_dev->id); + + if (!freq_table[cluster]) + return; + + clk_put(clk[cluster]); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); +} + +static void put_cluster_clk_and_freq_table(struct device *cpu_dev, + const struct cpumask *cpumask) +{ + u32 cluster = cpu_to_cluster(cpu_dev->id); + int i; + + if (atomic_dec_return(&cluster_usage[cluster])) + return; + + if (cluster < MAX_CLUSTERS) + return _put_cluster_clk_and_freq_table(cpu_dev, cpumask); + + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + + if (!cdev) + return; + + _put_cluster_clk_and_freq_table(cdev, cpumask); + } + + /* free virtual table */ + kfree(freq_table[cluster]); +} + +static int _get_cluster_clk_and_freq_table(struct device *cpu_dev, + const struct cpumask *cpumask) +{ + u32 cluster = raw_cpu_to_cluster(cpu_dev->id); + int ret; + + if (freq_table[cluster]) + return 0; + /* * platform specific SPC code must initialise the opp table * so just check if the OPP count is non-zero */ - return dev_pm_opp_get_opp_count(cpu_dev) <= 0; + ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0; + if (ret) + goto out; + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); + if (ret) + goto out; + + clk[cluster] = clk_get(cpu_dev, NULL); + if (!IS_ERR(clk[cluster])) + return 0; + + dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", + __func__, cpu_dev->id, cluster); + ret = PTR_ERR(clk[cluster]); + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); + +out: + dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, + cluster); + return ret; } -static int ve_spc_get_transition_latency(struct device *cpu_dev) +static int get_cluster_clk_and_freq_table(struct device *cpu_dev, + const struct cpumask *cpumask) { - return 1000000; /* 1 ms */ + u32 cluster = cpu_to_cluster(cpu_dev->id); + int i, ret; + + if (atomic_inc_return(&cluster_usage[cluster]) != 1) + return 0; + + if (cluster < MAX_CLUSTERS) { + ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); + if (ret) + atomic_dec(&cluster_usage[cluster]); + return ret; + } + + /* + * Get data for all clusters and fill virtual cluster with a merge of + * both + */ + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + + if (!cdev) + return -ENODEV; + + ret = _get_cluster_clk_and_freq_table(cdev, cpumask); + if (ret) + goto put_clusters; + } + + ret = merge_cluster_tables(); + if (ret) + goto put_clusters; + + /* Assuming 2 cluster, set clk_big_min and clk_little_max */ + clk_big_min = get_table_min(freq_table[A15_CLUSTER]); + clk_little_max = VIRT_FREQ(A7_CLUSTER, + get_table_max(freq_table[A7_CLUSTER])); + + return 0; + +put_clusters: + for_each_present_cpu(i) { + struct device *cdev = get_cpu_device(i); + + if (!cdev) + return -ENODEV; + + _put_cluster_clk_and_freq_table(cdev, cpumask); + } + + atomic_dec(&cluster_usage[cluster]); + + return ret; } -static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = { - .name = "vexpress-spc", - .get_transition_latency = ve_spc_get_transition_latency, - .init_opp_table = ve_spc_init_opp_table, +/* Per-CPU initialization */ +static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) +{ + u32 cur_cluster = cpu_to_cluster(policy->cpu); + struct device *cpu_dev; + int ret; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + policy->cpu); + return -ENODEV; + } + + if (cur_cluster < MAX_CLUSTERS) { + int cpu; + + dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); + + for_each_cpu(cpu, policy->cpus) + per_cpu(physical_cluster, cpu) = cur_cluster; + } else { + /* Assumption: during init, we are always running on A15 */ + per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; + } + + ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus); + if (ret) + return ret; + + policy->freq_table = freq_table[cur_cluster]; + policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ + + dev_pm_opp_of_register_em(policy->cpus); + + if (is_bL_switching_enabled()) + per_cpu(cpu_last_req_freq, policy->cpu) = + clk_get_cpu_rate(policy->cpu); + + dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); + return 0; +} + +static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct device *cpu_dev; + int cur_cluster = cpu_to_cluster(policy->cpu); + + if (cur_cluster < MAX_CLUSTERS) { + cpufreq_cooling_unregister(cdev[cur_cluster]); + cdev[cur_cluster] = NULL; + } + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + policy->cpu); + return -ENODEV; + } + + put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); + return 0; +} + +static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy) +{ + int cur_cluster = cpu_to_cluster(policy->cpu); + + /* Do not register a cpu_cooling device if we are in IKS mode */ + if (cur_cluster >= MAX_CLUSTERS) + return; + + cdev[cur_cluster] = of_cpufreq_cooling_register(policy); +} + +static struct cpufreq_driver ve_spc_cpufreq_driver = { + .name = "vexpress-spc", + .flags = CPUFREQ_STICKY | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = ve_spc_cpufreq_set_target, + .get = ve_spc_cpufreq_get_rate, + .init = ve_spc_cpufreq_init, + .exit = ve_spc_cpufreq_exit, + .ready = ve_spc_cpufreq_ready, + .attr = cpufreq_generic_attr, }; +#ifdef CONFIG_BL_SWITCHER +static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, + unsigned long action, void *_arg) +{ + pr_debug("%s: action: %ld\n", __func__, action); + + switch (action) { + case BL_NOTIFY_PRE_ENABLE: + case BL_NOTIFY_PRE_DISABLE: + cpufreq_unregister_driver(&ve_spc_cpufreq_driver); + break; + + case BL_NOTIFY_POST_ENABLE: + set_switching_enabled(true); + cpufreq_register_driver(&ve_spc_cpufreq_driver); + break; + + case BL_NOTIFY_POST_DISABLE: + set_switching_enabled(false); + cpufreq_register_driver(&ve_spc_cpufreq_driver); + break; + + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block bL_switcher_notifier = { + .notifier_call = bL_cpufreq_switcher_notifier, +}; + +static int __bLs_register_notifier(void) +{ + return bL_switcher_register_notifier(&bL_switcher_notifier); +} + +static int __bLs_unregister_notifier(void) +{ + return bL_switcher_unregister_notifier(&bL_switcher_notifier); +} +#else +static int __bLs_register_notifier(void) { return 0; } +static int __bLs_unregister_notifier(void) { return 0; } +#endif + static int ve_spc_cpufreq_probe(struct platform_device *pdev) { - return bL_cpufreq_register(&ve_spc_cpufreq_ops); + int ret, i; + + set_switching_enabled(bL_switcher_get_enabled()); + + for (i = 0; i < MAX_CLUSTERS; i++) + mutex_init(&cluster_lock[i]); + + ret = cpufreq_register_driver(&ve_spc_cpufreq_driver); + if (ret) { + pr_info("%s: Failed registering platform driver: %s, err: %d\n", + __func__, ve_spc_cpufreq_driver.name, ret); + } else { + ret = __bLs_register_notifier(); + if (ret) + cpufreq_unregister_driver(&ve_spc_cpufreq_driver); + else + pr_info("%s: Registered platform driver: %s\n", + __func__, ve_spc_cpufreq_driver.name); + } + + bL_switcher_put_enabled(); + return ret; } static int ve_spc_cpufreq_remove(struct platform_device *pdev) { - bL_cpufreq_unregister(&ve_spc_cpufreq_ops); + bL_switcher_get_enabled(); + __bLs_unregister_notifier(); + cpufreq_unregister_driver(&ve_spc_cpufreq_driver); + bL_switcher_put_enabled(); + pr_info("%s: Un-registered platform driver: %s\n", __func__, + ve_spc_cpufreq_driver.name); return 0; } @@ -68,4 +599,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = { }; module_platform_driver(ve_spc_cpufreq_platdrv); -MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); +MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); +MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver"); +MODULE_LICENSE("GPL v2"); |