summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c24
-rw-r--r--kernel/time/tick-broadcast.c164
-rw-r--r--kernel/time/tick-common.c39
-rw-r--r--kernel/time/tick-sched.h10
-rw-r--r--kernel/time/timekeeping.c29
-rw-r--r--kernel/time/timer.c3
7 files changed, 166 insertions, 105 deletions
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index ffc4cc3dcd47..49eca0beed32 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -12,5 +12,3 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
obj-$(CONFIG_TIMER_STATS) += timer_stats.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
-
-$(obj)/time.o: $(objtree)/include/config/
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 08ccc3da3ca0..50eb107f1198 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev,
/* The clockevent device is getting replaced. Shut it down. */
case CLOCK_EVT_STATE_SHUTDOWN:
- return dev->set_state_shutdown(dev);
+ if (dev->set_state_shutdown)
+ return dev->set_state_shutdown(dev);
+ return 0;
case CLOCK_EVT_STATE_PERIODIC:
/* Core internal bug */
if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
return -ENOSYS;
- return dev->set_state_periodic(dev);
+ if (dev->set_state_periodic)
+ return dev->set_state_periodic(dev);
+ return 0;
case CLOCK_EVT_STATE_ONESHOT:
/* Core internal bug */
if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
return -ENOSYS;
- return dev->set_state_oneshot(dev);
+ if (dev->set_state_oneshot)
+ return dev->set_state_oneshot(dev);
+ return 0;
case CLOCK_EVT_STATE_ONESHOT_STOPPED:
/* Core internal bug */
@@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
if (dev->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
- /* New state-specific callbacks */
- if (!dev->set_state_shutdown)
- return -EINVAL;
-
- if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
- !dev->set_state_periodic)
- return -EINVAL;
-
- if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
- !dev->set_state_oneshot)
- return -EINVAL;
-
return 0;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d39f32cdd1b5..f6aae7977824 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
unsigned long flags;
- int ret;
+ int ret = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
@@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
* If we kept the cpu in the broadcast mask,
* tell the caller to leave the per cpu device
* in shutdown state. The periodic interrupt
- * is delivered by the broadcast device.
+ * is delivered by the broadcast device, if
+ * the broadcast device exists and is not
+ * hrtimer based.
*/
- ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
+ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
+ ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
break;
default:
- /* Nothing to do */
- ret = 0;
break;
}
}
@@ -265,8 +266,22 @@ static bool tick_do_broadcast(struct cpumask *mask)
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
cpumask_clear_cpu(cpu, mask);
- local = true;
+ /*
+ * We only run the local handler, if the broadcast
+ * device is not hrtimer based. Otherwise we run into
+ * a hrtimer recursion.
+ *
+ * local timer_interrupt()
+ * local_handler()
+ * expire_hrtimers()
+ * bc_handler()
+ * local_handler()
+ * expire_hrtimers()
+ */
+ local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
}
if (!cpumask_empty(mask)) {
@@ -301,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
bool bc_local;
raw_spin_lock(&tick_broadcast_lock);
+
+ /* Handle spurious interrupts gracefully */
+ if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
+ raw_spin_unlock(&tick_broadcast_lock);
+ return;
+ }
+
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
@@ -359,8 +381,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
case TICK_BROADCAST_ON:
cpumask_set_cpu(cpu, tick_broadcast_on);
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
- if (tick_broadcast_device.mode ==
- TICKDEV_MODE_PERIODIC)
+ /*
+ * Only shutdown the cpu local device, if:
+ *
+ * - the broadcast device exists
+ * - the broadcast device is not a hrtimer based one
+ * - the broadcast device is in periodic mode to
+ * avoid a hickup during switch to oneshot mode
+ */
+ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
+ tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
}
break;
@@ -379,14 +409,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
break;
}
- if (cpumask_empty(tick_broadcast_mask)) {
- if (!bc_stopped)
- clockevents_shutdown(bc);
- } else if (bc_stopped) {
- if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
- tick_broadcast_start_periodic(bc);
- else
- tick_broadcast_setup_oneshot(bc);
+ if (bc) {
+ if (cpumask_empty(tick_broadcast_mask)) {
+ if (!bc_stopped)
+ clockevents_shutdown(bc);
+ } else if (bc_stopped) {
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ tick_broadcast_start_periodic(bc);
+ else
+ tick_broadcast_setup_oneshot(bc);
+ }
}
raw_spin_unlock(&tick_broadcast_lock);
}
@@ -662,71 +694,82 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
}
-/**
- * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
- * @state: The target state (enter/exit)
- *
- * The system enters/leaves a state, where affected devices might stop
- * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
- *
- * Called with interrupts disabled, so clockevents_lock is not
- * required here because the local clock event device cannot go away
- * under us.
- */
-int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
struct clock_event_device *bc, *dev;
- struct tick_device *td;
int cpu, ret = 0;
ktime_t now;
/*
- * Periodic mode does not care about the enter/exit of power
- * states
+ * If there is no broadcast device, tell the caller not to go
+ * into deep idle.
*/
- if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
- return 0;
+ if (!tick_broadcast_device.evtdev)
+ return -EBUSY;
- /*
- * We are called with preemtion disabled from the depth of the
- * idle code, so we can't be moved away.
- */
- td = this_cpu_ptr(&tick_cpu_device);
- dev = td->evtdev;
-
- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
- return 0;
+ dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
raw_spin_lock(&tick_broadcast_lock);
bc = tick_broadcast_device.evtdev;
cpu = smp_processor_id();
if (state == TICK_BROADCAST_ENTER) {
+ /*
+ * If the current CPU owns the hrtimer broadcast
+ * mechanism, it cannot go deep idle and we do not add
+ * the CPU to the broadcast mask. We don't have to go
+ * through the EXIT path as the local timer is not
+ * shutdown.
+ */
+ ret = broadcast_needs_cpu(bc, cpu);
+ if (ret)
+ goto out;
+
+ /*
+ * If the broadcast device is in periodic mode, we
+ * return.
+ */
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
+ /* If it is a hrtimer based broadcast, return busy */
+ if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
+ ret = -EBUSY;
+ goto out;
+ }
+
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
+
+ /* Conditionally shut down the local timer. */
broadcast_shutdown_local(bc, dev);
+
/*
* We only reprogram the broadcast timer if we
* did not mark ourself in the force mask and
* if the cpu local event is earlier than the
* broadcast event. If the current CPU is in
* the force mask, then we are going to be
- * woken by the IPI right away.
+ * woken by the IPI right away; we return
+ * busy, so the CPU does not try to go deep
+ * idle.
*/
- if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
- dev->next_event.tv64 < bc->next_event.tv64)
+ if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
+ ret = -EBUSY;
+ } else if (dev->next_event.tv64 < bc->next_event.tv64) {
tick_broadcast_set_event(bc, cpu, dev->next_event);
+ /*
+ * In case of hrtimer broadcasts the
+ * programming might have moved the
+ * timer to this cpu. If yes, remove
+ * us from the broadcast mask and
+ * return busy.
+ */
+ ret = broadcast_needs_cpu(bc, cpu);
+ if (ret) {
+ cpumask_clear_cpu(cpu,
+ tick_broadcast_oneshot_mask);
+ }
+ }
}
- /*
- * If the current CPU owns the hrtimer broadcast
- * mechanism, it cannot go deep idle and we remove the
- * CPU from the broadcast mask. We don't have to go
- * through the EXIT path as the local timer is not
- * shutdown.
- */
- ret = broadcast_needs_cpu(bc, cpu);
- if (ret)
- cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
} else {
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
@@ -796,7 +839,6 @@ out:
raw_spin_unlock(&tick_broadcast_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
/*
* Reset the one shot broadcast for a cpu
@@ -938,6 +980,16 @@ bool tick_broadcast_oneshot_available(void)
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}
+#else
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
+ return -EBUSY;
+
+ return 0;
+}
#endif
void __init tick_broadcast_init(void)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 17f144450050..f8bf47571dda 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -19,6 +19,7 @@
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/module.h>
+#include <trace/events/power.h>
#include <asm/irq_regs.h>
@@ -342,6 +343,28 @@ out_bc:
tick_install_broadcast_device(newdev);
}
+/**
+ * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
+ * @state: The target state (enter/exit)
+ *
+ * The system enters/leaves a state, where affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
+ *
+ * Called with interrupts disabled, so clockevents_lock is not
+ * required here because the local clock event device cannot go away
+ * under us.
+ */
+int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+
+ if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 0;
+
+ return __tick_broadcast_oneshot_control(state);
+}
+EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
+
#ifdef CONFIG_HOTPLUG_CPU
/*
* Transfer the do_timer job away from a dying cpu.
@@ -450,6 +473,7 @@ void tick_resume(void)
tick_resume_local();
}
+#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth;
@@ -467,10 +491,13 @@ void tick_freeze(void)
raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++;
- if (tick_freeze_depth == num_online_cpus())
+ if (tick_freeze_depth == num_online_cpus()) {
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), true);
timekeeping_suspend();
- else
+ } else {
tick_suspend_local();
+ }
raw_spin_unlock(&tick_freeze_lock);
}
@@ -488,15 +515,19 @@ void tick_unfreeze(void)
{
raw_spin_lock(&tick_freeze_lock);
- if (tick_freeze_depth == num_online_cpus())
+ if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
- else
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), false);
+ } else {
tick_resume_local();
+ }
tick_freeze_depth--;
raw_spin_unlock(&tick_freeze_lock);
}
+#endif /* CONFIG_SUSPEND */
/**
* tick_init - initialize the tick control
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 42fdf4958bcc..a4a8d4e9baa1 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu);
static inline void tick_cancel_sched_timer(int cpu) { }
#endif
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
+#else
+static inline int
+__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ return -EBUSY;
+}
+#endif
+
#endif
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 30b7a409bf1e..bca3667a2de1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -319,32 +319,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
- * So we handle this differently than the other timekeeping accessor
- * functions which retry when the sequence count has changed. The
- * update side does:
- *
- * smp_wmb(); <- Ensure that the last base[1] update is visible
- * tkf->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
- * update(tkf->base[0], tkr);
- * smp_wmb(); <- Ensure that the base[0] update is visible
- * tkf->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
- * update(tkf->base[1], tkr);
- *
- * The reader side does:
- *
- * do {
- * seq = tkf->seq;
- * smp_rmb();
- * idx = seq & 0x01;
- * now = now(tkf->base[idx]);
- * smp_rmb();
- * } while (seq != tkf->seq)
- *
- * As long as we update base[0] readers are forced off to
- * base[1]. Once base[0] is updated readers are redirected to base[0]
- * and the base[1] update takes place.
+ * Employ the latch technique; see @raw_write_seqcount_latch.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
@@ -407,7 +382,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
u64 now;
do {
- seq = raw_read_seqcount(&tkf->seq);
+ seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
} while (read_seqcount_retry(&tkf->seq, seq));
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 520499dd85af..5e097fa9faf7 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1566,7 +1566,7 @@ static void migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu_ptr(&tvec_bases, cpu);
- new_base = this_cpu_ptr(&tvec_bases);
+ new_base = get_cpu_ptr(&tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1590,6 +1590,7 @@ static void migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
+ put_cpu_ptr(&tvec_bases);
}
static int timer_cpu_notify(struct notifier_block *self,
OpenPOWER on IntegriCloud