summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-msm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-msm')
-rw-r--r--arch/arm/mach-msm/timer.c124
1 files changed, 69 insertions, 55 deletions
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 8697cfc0d0b6..a7afbacae61a 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -16,6 +16,7 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -26,7 +27,6 @@
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
-#include <asm/localtimer.h>
#include "common.h"
@@ -49,7 +49,7 @@ static void __iomem *sts_base;
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ struct clock_event_device *evt = dev_id;
/* Stop the timer tick */
if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
@@ -101,18 +101,7 @@ static void msm_timer_set_mode(enum clock_event_mode mode,
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
}
-static struct clock_event_device msm_clockevent = {
- .name = "gp_timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_next_event = msm_timer_set_next_event,
- .set_mode = msm_timer_set_mode,
-};
-
-static union {
- struct clock_event_device *evt;
- struct clock_event_device * __percpu *percpu_evt;
-} msm_evt;
+static struct clock_event_device __percpu *msm_evt;
static void __iomem *source_base;
@@ -138,23 +127,34 @@ static struct clocksource msm_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-#ifdef CONFIG_LOCAL_TIMERS
+static int msm_timer_irq;
+static int msm_timer_has_ppi;
+
static int msm_local_timer_setup(struct clock_event_device *evt)
{
- /* Use existing clock_event for cpu 0 */
- if (!smp_processor_id())
- return 0;
-
- evt->irq = msm_clockevent.irq;
- evt->name = "local_timer";
- evt->features = msm_clockevent.features;
- evt->rating = msm_clockevent.rating;
+ int cpu = smp_processor_id();
+ int err;
+
+ evt->irq = msm_timer_irq;
+ evt->name = "msm_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = 200;
evt->set_mode = msm_timer_set_mode;
evt->set_next_event = msm_timer_set_next_event;
+ evt->cpumask = cpumask_of(cpu);
+
+ clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
+
+ if (msm_timer_has_ppi) {
+ enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
+ } else {
+ err = request_irq(evt->irq, msm_timer_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING |
+ IRQF_TRIGGER_RISING, "gp_timer", evt);
+ if (err)
+ pr_err("request_irq failed\n");
+ }
- *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
- clockevents_config_and_register(evt, GPT_HZ, 4, 0xf0000000);
- enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
return 0;
}
@@ -164,11 +164,28 @@ static void msm_local_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(evt->irq);
}
-static struct local_timer_ops msm_local_timer_ops = {
- .setup = msm_local_timer_setup,
- .stop = msm_local_timer_stop,
+static int msm_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ msm_local_timer_setup(this_cpu_ptr(msm_evt));
+ break;
+ case CPU_DYING:
+ msm_local_timer_stop(this_cpu_ptr(msm_evt));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block msm_timer_cpu_nb = {
+ .notifier_call = msm_timer_cpu_notify,
};
-#endif /* CONFIG_LOCAL_TIMERS */
static notrace u32 msm_sched_clock_read(void)
{
@@ -178,38 +195,35 @@ static notrace u32 msm_sched_clock_read(void)
static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
- struct clock_event_device *ce = &msm_clockevent;
struct clocksource *cs = &msm_clocksource;
- int res;
+ int res = 0;
+
+ msm_timer_irq = irq;
+ msm_timer_has_ppi = percpu;
+
+ msm_evt = alloc_percpu(struct clock_event_device);
+ if (!msm_evt) {
+ pr_err("memory allocation failed for clockevents\n");
+ goto err;
+ }
- ce->cpumask = cpumask_of(0);
- ce->irq = irq;
+ if (percpu)
+ res = request_percpu_irq(irq, msm_timer_interrupt,
+ "gp_timer", msm_evt);
- clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff);
- if (percpu) {
- msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
- if (!msm_evt.percpu_evt) {
- pr_err("memory allocation failed for %s\n", ce->name);
+ if (res) {
+ pr_err("request_percpu_irq failed\n");
+ } else {
+ res = register_cpu_notifier(&msm_timer_cpu_nb);
+ if (res) {
+ free_percpu_irq(irq, msm_evt);
goto err;
}
- *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
- res = request_percpu_irq(ce->irq, msm_timer_interrupt,
- ce->name, msm_evt.percpu_evt);
- if (!res) {
- enable_percpu_irq(ce->irq, IRQ_TYPE_EDGE_RISING);
-#ifdef CONFIG_LOCAL_TIMERS
- local_timer_register(&msm_local_timer_ops);
-#endif
- }
- } else {
- msm_evt.evt = ce;
- res = request_irq(ce->irq, msm_timer_interrupt,
- IRQF_TIMER | IRQF_NOBALANCING |
- IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
+
+ /* Immediately configure the timer on the boot CPU */
+ msm_local_timer_setup(__this_cpu_ptr(msm_evt));
}
- if (res)
- pr_err("request_irq failed for %s\n", ce->name);
err:
writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
res = clocksource_register_hz(cs, dgt_hz);
OpenPOWER on IntegriCloud