summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-09-16 11:32:50 -0700
committerThomas Gleixner <tglx@linutronix.de>2008-09-16 13:47:02 -0700
commit2344abbcbdb82140050e8be29d3d55e4f6fe860b (patch)
tree46c1842fc2a47aa4d7ee0c2c558f54bc50772b69 /kernel
parentf1926ce63b996b42772b39e4b47bb4ef4ba748b4 (diff)
downloadblackbird-obmc-linux-2344abbcbdb82140050e8be29d3d55e4f6fe860b.tar.gz
blackbird-obmc-linux-2344abbcbdb82140050e8be29d3d55e4f6fe860b.zip
clockevents: make device shutdown robust
The device shut down does not cleanup the next_event variable of the clock event device. So when the device is reactivated the possible stale next_event value can prevent the device to be reprogrammed as it claims to wait on a event already. This is the root cause of the resurfacing suspend/resume problem, where systems need key press to come back to life. Fix this by setting next_event to KTIME_MAX when the device is shut down. Use a separate function for shutdown which takes care of that and only keep the direct set mode call in the broadcast code, where we can not touch the next_event value. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/clockevents.c12
-rw-r--r--kernel/time/tick-broadcast.c9
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/tick-internal.h2
4 files changed, 19 insertions, 8 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1876b526c778..f8d968063cea 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev,
}
/**
+ * clockevents_shutdown - shutdown the device and clear next_event
+ * @dev: device to shutdown
+ */
+void clockevents_shutdown(struct clock_event_device *dev)
+{
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+ dev->next_event.tv64 = KTIME_MAX;
+}
+
+/**
* clockevents_program_event - Reprogram the clock event device.
* @expires: absolute expiry time (monotonic clock)
*
@@ -206,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
if (new) {
BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
- clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(new);
}
local_irq_restore(flags);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2f5a38294bf9..f1f3eee28113 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -236,8 +236,7 @@ static void tick_do_broadcast_on_off(void *why)
if (!cpu_isset(cpu, tick_broadcast_mask)) {
cpu_set(cpu, tick_broadcast_mask);
if (td->mode == TICKDEV_MODE_PERIODIC)
- clockevents_set_mode(dev,
- CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(dev);
}
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
tick_broadcast_force = 1;
@@ -254,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why)
if (cpus_empty(tick_broadcast_mask)) {
if (!bc_stopped)
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
} else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
@@ -306,7 +305,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpus_empty(tick_broadcast_mask))
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
}
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -321,7 +320,7 @@ void tick_suspend_broadcast(void)
bc = tick_broadcast_device.evtdev;
if (bc)
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index c4777193d567..019315ebf9de 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -249,7 +249,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
* not give it back to the clockevents layer !
*/
if (tick_is_broadcast_device(curdev)) {
- clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(curdev);
curdev = NULL;
}
clockevents_exchange_device(curdev, newdev);
@@ -311,7 +311,7 @@ static void tick_suspend(void)
unsigned long flags;
spin_lock_irqsave(&tick_device_lock, flags);
- clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(td->evtdev);
spin_unlock_irqrestore(&tick_device_lock, flags);
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 0ffc2918ea6f..6e9db9734aa6 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -10,6 +10,8 @@ extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
extern void tick_handle_periodic(struct clock_event_device *dev);
+extern void clockevents_shutdown(struct clock_event_device *dev);
+
/*
* NO_HZ / high resolution timer shared code
*/
OpenPOWER on IntegriCloud