summaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 09:08:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-22 09:08:39 -0700
commite6c81cce5699ec6be3a7533b5ad7a062ab3357f2 (patch)
tree4592735bbfb17f163217d0bb80877dcc22a869c0 /arch/arm/common
parentd0440c59f52d31aa7f74ba8e35cc22ee96acea84 (diff)
parenta018bb2ff95868ea592212b6735b35836fda268b (diff)
downloadblackbird-op-linux-e6c81cce5699ec6be3a7533b5ad7a062ab3357f2.tar.gz
blackbird-op-linux-e6c81cce5699ec6be3a7533b5ad7a062ab3357f2.zip
Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC platform updates from Olof Johansson: "Our SoC branch usually contains expanded support for new SoCs and other core platform code. In this case, that includes: - support for the new Annapurna Labs "Alpine" platform - a rework greatly simplifying adding new platform support to the MCPM subsystem (Multi-cluster power management) - cpuidle and PM improvements for Exynos3250 - misc updates for Renesas, OMAP, Meson, i.MX. Some of these could have gone in other branches but ended up here for various reasons" * tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (53 commits) ARM: alpine: add support for generic pci ARM: Exynos: migrate DCSCB to the new MCPM backend abstraction ARM: vexpress: migrate DCSCB to the new MCPM backend abstraction ARM: vexpress: DCSCB: tighten CPU validity assertion ARM: vexpress: migrate TC2 to the new MCPM backend abstraction ARM: MCPM: move the algorithmic complexity to the core code ARM: EXYNOS: allow cpuidle driver usage on Exynos3250 SoC ARM: EXYNOS: add AFTR mode support for Exynos3250 ARM: EXYNOS: add code for setting/clearing boot flag ARM: EXYNOS: fix CPU1 hotplug on Exynos3250 ARM: S3C64XX: Use fixed IRQ bases to avoid conflicts on Cragganmore ARM: cygnus: fix const declaration bcm_cygnus_dt_compat ARM: DRA7: hwmod: Fix the hwmod class for GPTimer4 ARM: DRA7: hwmod: Add data for GPTimers 13 through 16 ARM: EXYNOS: Remove left over 'extra_save' ARM: EXYNOS: Constify exynos_pm_data array ARM: EXYNOS: use static in suspend.c ARM: EXYNOS: Use platform device name as power domain name ARM: EXYNOS: add support for async-bridge clocks for pm_domains ARM: omap-device: add missed callback for suspend-to-disk ...
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/mcpm_entry.c202
1 files changed, 170 insertions, 32 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 3c165fc2dce2..5f8a52ac7edf 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -55,22 +55,81 @@ bool mcpm_is_available(void)
return (platform_ops) ? true : false;
}
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
+
+static inline bool mcpm_cluster_unused(unsigned int cluster)
+{
+ int i, cnt;
+ for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
+ cnt |= mcpm_cpu_use_count[cluster][i];
+ return !cnt;
+}
+
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
{
+ bool cpu_is_down, cluster_is_down;
+ int ret = 0;
+
if (!platform_ops)
return -EUNATCH; /* try not to shadow power_up errors */
might_sleep();
- return platform_ops->power_up(cpu, cluster);
+
+ /* backward compatibility callback */
+ if (platform_ops->power_up)
+ return platform_ops->power_up(cpu, cluster);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&mcpm_lock);
+
+ cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
+ cluster_is_down = mcpm_cluster_unused(cluster);
+
+ mcpm_cpu_use_count[cluster][cpu]++;
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
+ mcpm_cpu_use_count[cluster][cpu] != 2);
+
+ if (cluster_is_down)
+ ret = platform_ops->cluster_powerup(cluster);
+ if (cpu_is_down && !ret)
+ ret = platform_ops->cpu_powerup(cpu, cluster);
+
+ arch_spin_unlock(&mcpm_lock);
+ local_irq_enable();
+ return ret;
}
typedef void (*phys_reset_t)(unsigned long);
void mcpm_cpu_power_down(void)
{
+ unsigned int mpidr, cpu, cluster;
+ bool cpu_going_down, last_man;
phys_reset_t phys_reset;
- if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
- return;
+ if (WARN_ON_ONCE(!platform_ops))
+ return;
BUG_ON(!irqs_disabled());
/*
@@ -79,28 +138,65 @@ void mcpm_cpu_power_down(void)
*/
setup_mm_for_reboot();
- platform_ops->power_down();
+ /* backward compatibility callback */
+ if (platform_ops->power_down) {
+ platform_ops->power_down();
+ goto not_dead;
+ }
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+
+ __mcpm_cpu_going_down(cpu, cluster);
+ arch_spin_lock(&mcpm_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+
+ mcpm_cpu_use_count[cluster][cpu]--;
+ BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
+ mcpm_cpu_use_count[cluster][cpu] != 1);
+ cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
+ last_man = mcpm_cluster_unused(cluster);
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ platform_ops->cpu_powerdown_prepare(cpu, cluster);
+ platform_ops->cluster_powerdown_prepare(cluster);
+ arch_spin_unlock(&mcpm_lock);
+ platform_ops->cluster_cache_disable();
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ if (cpu_going_down)
+ platform_ops->cpu_powerdown_prepare(cpu, cluster);
+ arch_spin_unlock(&mcpm_lock);
+ /*
+ * If cpu_going_down is false here, that means a power_up
+ * request raced ahead of us. Even if we do not want to
+ * shut this CPU down, the caller still expects execution
+ * to return through the system resume entry path, like
+ * when the WFI is aborted due to a new IRQ or the like..
+ * So let's continue with cache cleaning in all cases.
+ */
+ platform_ops->cpu_cache_disable();
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (cpu_going_down)
+ wfi();
+
+not_dead:
/*
* It is possible for a power_up request to happen concurrently
* with a power_down request for the same CPU. In this case the
- * power_down method might not be able to actually enter a
- * powered down state with the WFI instruction if the power_up
- * method has removed the required reset condition. The
- * power_down method is then allowed to return. We must perform
- * a re-entry in the kernel as if the power_up method just had
- * deasserted reset on the CPU.
- *
- * To simplify race issues, the platform specific implementation
- * must accommodate for the possibility of unordered calls to
- * power_down and power_up with a usage count. Therefore, if a
- * call to power_up is issued for a CPU that is not down, then
- * the next call to power_down must not attempt a full shutdown
- * but only do the minimum (normally disabling L1 cache and CPU
- * coherency) and return just as if a concurrent power_up request
- * had happened as described above.
+ * CPU might not be able to actually enter a powered down state
+ * with the WFI instruction if the power_up request has removed
+ * the required reset condition. We must perform a re-entry in
+ * the kernel as if the power_up method just had deasserted reset
+ * on the CPU.
*/
-
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point));
@@ -125,26 +221,66 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
void mcpm_cpu_suspend(u64 expected_residency)
{
- phys_reset_t phys_reset;
-
- if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
+ if (WARN_ON_ONCE(!platform_ops))
return;
- BUG_ON(!irqs_disabled());
- /* Very similar to mcpm_cpu_power_down() */
- setup_mm_for_reboot();
- platform_ops->suspend(expected_residency);
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
- phys_reset(virt_to_phys(mcpm_entry_point));
- BUG();
+ /* backward compatibility callback */
+ if (platform_ops->suspend) {
+ phys_reset_t phys_reset;
+ BUG_ON(!irqs_disabled());
+ setup_mm_for_reboot();
+ platform_ops->suspend(expected_residency);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+ }
+
+ /* Some platforms might have to enable special resume modes, etc. */
+ if (platform_ops->cpu_suspend_prepare) {
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ arch_spin_lock(&mcpm_lock);
+ platform_ops->cpu_suspend_prepare(cpu, cluster);
+ arch_spin_unlock(&mcpm_lock);
+ }
+ mcpm_cpu_power_down();
}
int mcpm_cpu_powered_up(void)
{
+ unsigned int mpidr, cpu, cluster;
+ bool cpu_was_down, first_man;
+ unsigned long flags;
+
if (!platform_ops)
return -EUNATCH;
- if (platform_ops->powered_up)
+
+ /* backward compatibility callback */
+ if (platform_ops->powered_up) {
platform_ops->powered_up();
+ return 0;
+ }
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ local_irq_save(flags);
+ arch_spin_lock(&mcpm_lock);
+
+ cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
+ first_man = mcpm_cluster_unused(cluster);
+
+ if (first_man && platform_ops->cluster_is_up)
+ platform_ops->cluster_is_up(cluster);
+ if (cpu_was_down)
+ mcpm_cpu_use_count[cluster][cpu] = 1;
+ if (platform_ops->cpu_is_up)
+ platform_ops->cpu_is_up(cpu, cluster);
+
+ arch_spin_unlock(&mcpm_lock);
+ local_irq_restore(flags);
+
return 0;
}
@@ -334,8 +470,10 @@ int __init mcpm_sync_init(
}
mpidr = read_cpuid_mpidr();
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- for_each_online_cpu(i)
+ for_each_online_cpu(i) {
+ mcpm_cpu_use_count[this_cluster][i] = 1;
mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
+ }
mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
sync_cache_w(&mcpm_sync);
OpenPOWER on IntegriCloud