diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 11:14:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 11:14:36 -0700 |
commit | f46e9913faeebcb6bd29edf795f12b60acbff171 (patch) | |
tree | 1ed8871d0ebd638094d27317de1d8a53712ae15a /drivers/base | |
parent | 8d91530c5fd7f0b1e8c4ddfea2905e55a178569b (diff) | |
parent | 8d4b9d1bfef117862a2889dec4dac227068544c9 (diff) | |
download | blackbird-op-linux-f46e9913faeebcb6bd29edf795f12b60acbff171.tar.gz blackbird-op-linux-f46e9913faeebcb6bd29edf795f12b60acbff171.zip |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM / Runtime: Add runtime PM statistics (v3)
PM / Runtime: Make runtime_status attribute not debug-only (v. 2)
PM: Do not use dynamically allocated objects in pm_wakeup_event()
PM / Suspend: Fix ordering of calls in suspend error paths
PM / Hibernate: Fix snapshot error code path
PM / Hibernate: Fix hibernation_platform_enter()
pm_qos: Get rid of the allocation in pm_qos_add_request()
pm_qos: Reimplement using plists
plist: Add plist_last
PM: Make it possible to avoid races between wakeup and system sleep
PNPACPI: Add support for remote wakeup
PM: describe kernel policy regarding wakeup defaults (v. 2)
PM / Hibernate: Fix typos in comments in kernel/power/swap.c
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/Makefile | 2 | ||||
-rw-r--r-- | drivers/base/power/main.c | 1 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 54 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 98 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 247 |
5 files changed, 374 insertions, 28 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 89de75325cea..cbccf9a3cee4 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_PM) += sysfs.o -obj-$(CONFIG_PM_SLEEP) += main.o +obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_OPS) += generic_ops.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 941fcb87e52a..5419a49ff135 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) { dev->power.status = DPM_ON; init_completion(&dev->power.completion); + dev->power.wakeup_count = 0; pm_runtime_init(dev); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b0ec0e9f27e9..b78c401ffa73 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -123,6 +123,45 @@ int pm_runtime_idle(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_idle); + +/** + * update_pm_runtime_accounting - Update the time accounting of power states + * @dev: Device to update the accounting for + * + * In order to be able to have time accounting of the various power states + * (as used by programs such as PowerTOP to show the effectiveness of runtime + * PM), we need to track the time spent in each state. + * update_pm_runtime_accounting must be called each time before the + * runtime_status field is updated, to account the time in the old state + * correctly. + */ +void update_pm_runtime_accounting(struct device *dev) +{ + unsigned long now = jiffies; + int delta; + + delta = now - dev->power.accounting_timestamp; + + if (delta < 0) + delta = 0; + + dev->power.accounting_timestamp = now; + + if (dev->power.disable_depth > 0) + return; + + if (dev->power.runtime_status == RPM_SUSPENDED) + dev->power.suspended_jiffies += delta; + else + dev->power.active_jiffies += delta; +} + +static void __update_runtime_status(struct device *dev, enum rpm_status status) +{ + update_pm_runtime_accounting(dev); + dev->power.runtime_status = status; +} + /** * __pm_runtime_suspend - Carry out run-time suspend of given device. * @dev: Device to suspend. @@ -197,7 +236,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) goto repeat; } - dev->power.runtime_status = RPM_SUSPENDING; + __update_runtime_status(dev, RPM_SUSPENDING); dev->power.deferred_resume = false; if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { @@ -228,7 +267,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) } if (retval) { - dev->power.runtime_status = RPM_ACTIVE; + __update_runtime_status(dev, RPM_ACTIVE); if (retval == -EAGAIN || retval == -EBUSY) { if (dev->power.timer_expires == 0) notify = true; @@ -237,7 +276,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) pm_runtime_cancel_pending(dev); } } else { - dev->power.runtime_status = RPM_SUSPENDED; + __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); if (dev->parent) { @@ -381,7 +420,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) goto repeat; } - dev->power.runtime_status = RPM_RESUMING; + __update_runtime_status(dev, RPM_RESUMING); if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { spin_unlock_irq(&dev->power.lock); @@ -411,10 +450,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) } if (retval) { - dev->power.runtime_status = RPM_SUSPENDED; + __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); } else { - dev->power.runtime_status = RPM_ACTIVE; + __update_runtime_status(dev, RPM_ACTIVE); if (parent) atomic_inc(&parent->power.child_count); } @@ -848,7 +887,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) } out_set: - dev->power.runtime_status = status; + __update_runtime_status(dev, status); dev->power.runtime_error = 0; out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1077,6 +1116,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; + dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a4c33bc51257..e56b4388fe61 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,6 +6,7 @@ #include <linux/string.h> #include <linux/pm_runtime.h> #include <asm/atomic.h> +#include <linux/jiffies.h> #include "power.h" /* @@ -73,6 +74,8 @@ * device are known to the PM core. However, for some devices this * attribute is set to "enabled" by bus type code or device drivers and in * that cases it should be safe to leave the default value. + * + * wakeup_count - Report the number of wakeup events related to the device */ static const char enabled[] = "enabled"; @@ -108,6 +111,65 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, } static DEVICE_ATTR(control, 0644, control_show, control_store); + +static ssize_t rtpm_active_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); + +static ssize_t rtpm_suspended_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", + jiffies_to_msecs(dev->power.suspended_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); + +static ssize_t rtpm_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *p; + + if (dev->power.runtime_error) { + p = "error\n"; + } else if (dev->power.disable_depth) { + p = "unsupported\n"; + } else { + switch (dev->power.runtime_status) { + case RPM_SUSPENDED: + p = "suspended\n"; + break; + case RPM_SUSPENDING: + p = "suspending\n"; + break; + case RPM_RESUMING: + p = "resuming\n"; + break; + case RPM_ACTIVE: + p = "active\n"; + break; + default: + return -EIO; + } + } + return sprintf(buf, p); +} + +static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); #endif static ssize_t @@ -144,6 +206,16 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); +#ifdef CONFIG_PM_SLEEP +static ssize_t wakeup_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", dev->power.wakeup_count); +} + +static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); +#endif + #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_RUNTIME @@ -172,27 +244,8 @@ static ssize_t rtpm_enabled_show(struct device *dev, return sprintf(buf, "enabled\n"); } -static ssize_t rtpm_status_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - if (dev->power.runtime_error) - return sprintf(buf, "error\n"); - switch (dev->power.runtime_status) { - case RPM_SUSPENDED: - return sprintf(buf, "suspended\n"); - case RPM_SUSPENDING: - return sprintf(buf, "suspending\n"); - case RPM_RESUMING: - return sprintf(buf, "resuming\n"); - case RPM_ACTIVE: - return sprintf(buf, "active\n"); - } - return -EIO; -} - static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); -static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); #endif @@ -228,14 +281,19 @@ static DEVICE_ATTR(async, 0644, async_show, async_store); static struct attribute * power_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_control.attr, + &dev_attr_runtime_status.attr, + &dev_attr_runtime_suspended_time.attr, + &dev_attr_runtime_active_time.attr, #endif &dev_attr_wakeup.attr, +#ifdef CONFIG_PM_SLEEP + &dev_attr_wakeup_count.attr, +#endif #ifdef CONFIG_PM_ADVANCED_DEBUG &dev_attr_async.attr, #ifdef CONFIG_PM_RUNTIME &dev_attr_runtime_usage.attr, &dev_attr_runtime_active_kids.attr, - &dev_attr_runtime_status.attr, &dev_attr_runtime_enabled.attr, #endif #endif diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 000000000000..eb594facfc3f --- /dev/null +++ b/drivers/base/power/wakeup.c @@ -0,0 +1,247 @@ +/* + * drivers/base/power/wakeup.c - System wakeup events framework + * + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/capability.h> +#include <linux/suspend.h> +#include <linux/pm.h> + +/* + * If set, the suspend/hibernate code will abort transitions to a sleep state + * if wakeup events are registered during or immediately before the transition. + */ +bool events_check_enabled; + +/* The counter of registered wakeup events. */ +static unsigned long event_count; +/* A preserved old value of event_count. */ +static unsigned long saved_event_count; +/* The counter of wakeup events being processed. */ +static unsigned long events_in_progress; + +static DEFINE_SPINLOCK(events_lock); + +static void pm_wakeup_timer_fn(unsigned long data); + +static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); +static unsigned long events_timer_expires; + +/* + * The functions below use the observation that each wakeup event starts a + * period in which the system should not be suspended. The moment this period + * will end depends on how the wakeup event is going to be processed after being + * detected and all of the possible cases can be divided into two distinct + * groups. + * + * First, a wakeup event may be detected by the same functional unit that will + * carry out the entire processing of it and possibly will pass it to user space + * for further processing. In that case the functional unit that has detected + * the event may later "close" the "no suspend" period associated with it + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and + * pm_relax(), balanced with each other, is supposed to be used in such + * situations. + * + * Second, a wakeup event may be detected by one functional unit and processed + * by another one. In that case the unit that has detected it cannot really + * "close" the "no suspend" period associated with it, unless it knows in + * advance what's going to happen to the event during processing. This + * knowledge, however, may not be available to it, so it can simply specify time + * to wait before the system can be suspended and pass it as the second + * argument of pm_wakeup_event(). + */ + +/** + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. + * @dev: Device the wakeup event is related to. + * + * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the + * counter of wakeup events being processed. If @dev is not NULL, the counter + * of wakeup events related to @dev is incremented too. + * + * Call this function after detecting of a wakeup event if pm_relax() is going + * to be called directly after processing the event (and possibly passing it to + * user space for further processing). + * + * It is safe to call this function from interrupt context. + */ +void pm_stay_awake(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (dev) + dev->power.wakeup_count++; + + events_in_progress++; + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * + * Notify the PM core that a wakeup event has been processed by decrementing + * the counter of wakeup events being processed and incrementing the counter + * of registered wakeup events. + * + * Call this function for wakeup events whose processing started with calling + * pm_stay_awake(). + * + * It is safe to call it from interrupt context. + */ +void pm_relax(void) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (events_in_progress) { + events_in_progress--; + event_count++; + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. + * + * Decrease the counter of wakeup events being processed after it was increased + * by pm_wakeup_event(). + */ +static void pm_wakeup_timer_fn(unsigned long data) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (events_timer_expires + && time_before_eq(events_timer_expires, jiffies)) { + events_in_progress--; + events_timer_expires = 0; + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_wakeup_event - Notify the PM core of a wakeup event. + * @dev: Device the wakeup event is related to. + * @msec: Anticipated event processing time (in milliseconds). + * + * Notify the PM core of a wakeup event (signaled by @dev) that will take + * approximately @msec milliseconds to be processed by the kernel. Increment + * the counter of registered wakeup events and (if @msec is nonzero) set up + * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the + * timer has not been set up already, increment the counter of wakeup events + * being processed). If @dev is not NULL, the counter of wakeup events related + * to @dev is incremented too. + * + * It is safe to call this function from interrupt context. + */ +void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + event_count++; + if (dev) + dev->power.wakeup_count++; + + if (msec) { + unsigned long expires; + + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + if (!events_timer_expires + || time_after(expires, events_timer_expires)) { + if (!events_timer_expires) + events_in_progress++; + + mod_timer(&events_timer, expires); + events_timer_expires = expires; + } + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_check_wakeup_events - Check for new wakeup events. + * + * Compare the current number of registered wakeup events with its preserved + * value from the past to check if new wakeup events have been registered since + * the old value was stored. Check if the current number of wakeup events being + * processed is zero. + */ +bool pm_check_wakeup_events(void) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&events_lock, flags); + if (events_check_enabled) { + ret = (event_count == saved_event_count) && !events_in_progress; + events_check_enabled = ret; + } + spin_unlock_irqrestore(&events_lock, flags); + return ret; +} + +/** + * pm_get_wakeup_count - Read the number of registered wakeup events. + * @count: Address to store the value at. + * + * Store the number of registered wakeup events at the address in @count. Block + * if the current number of wakeup events being processed is nonzero. + * + * Return false if the wait for the number of wakeup events being processed to + * drop down to zero has been interrupted by a signal (and the current number + * of wakeup events being processed is still nonzero). Otherwise return true. + */ +bool pm_get_wakeup_count(unsigned long *count) +{ + bool ret; + + spin_lock_irq(&events_lock); + if (capable(CAP_SYS_ADMIN)) + events_check_enabled = false; + + while (events_in_progress && !signal_pending(current)) { + spin_unlock_irq(&events_lock); + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + spin_lock_irq(&events_lock); + } + *count = event_count; + ret = !events_in_progress; + spin_unlock_irq(&events_lock); + return ret; +} + +/** + * pm_save_wakeup_count - Save the current number of registered wakeup events. + * @count: Value to compare with the current number of registered wakeup events. + * + * If @count is equal to the current number of registered wakeup events and the + * current number of wakeup events being processed is zero, store @count as the + * old number of registered wakeup events to be used by pm_check_wakeup_events() + * and return true. Otherwise return false. + */ +bool pm_save_wakeup_count(unsigned long count) +{ + bool ret = false; + + spin_lock_irq(&events_lock); + if (count == event_count && !events_in_progress) { + saved_event_count = count; + events_check_enabled = true; + ret = true; + } + spin_unlock_irq(&events_lock); + return ret; +} |