summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2010-09-22 22:09:10 +0200
committerRafael J. Wysocki <rjw@sisk.pl>2010-10-17 01:57:43 +0200
commit074037ec79bea73edf1b1ec72fef1010e83e3cc5 (patch)
tree1876caa07f755e680da5ea102d985efe4b508d56 /drivers/base
parent0702d9ee0f1dcb6258789032f03b3208bfafaffc (diff)
downloadtalos-op-linux-074037ec79bea73edf1b1ec72fef1010e83e3cc5.tar.gz
talos-op-linux-074037ec79bea73edf1b1ec72fef1010e83e3cc5.zip
PM / Wakeup: Introduce wakeup source objects and event statistics (v3)
Introduce struct wakeup_source for representing system wakeup sources within the kernel and for collecting statistics related to them. Make the recently introduced helper functions pm_wakeup_event(), pm_stay_awake() and pm_relax() use struct wakeup_source objects internally, so that wakeup statistics associated with wakeup devices can be collected and reported in a consistent way (the definition of pm_relax() is changed, which is harmless, because this function is not called directly by anyone yet). Introduce new wakeup-related sysfs device attributes in /sys/devices/.../power for reporting the device wakeup statistics. Change the global wakeup events counters event_count and events_in_progress into atomic variables, so that it is not necessary to acquire a global spinlock in pm_wakeup_event(), pm_stay_awake() and pm_relax(), which should allow us to avoid lock contention in these functions on SMP systems with many wakeup devices. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/base/power/power.h1
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/power/sysfs.c121
-rw-r--r--drivers/base/power/wakeup.c528
5 files changed, 573 insertions, 83 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db677199403c..7ae6fe414c38 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -60,7 +60,8 @@ void device_pm_init(struct device *dev)
dev->power.status = DPM_ON;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
- dev->power.wakeup_count = 0;
+ dev->power.wakeup = NULL;
+ spin_lock_init(&dev->power.lock);
pm_runtime_init(dev);
}
@@ -120,6 +121,7 @@ void device_pm_remove(struct device *dev)
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
+ device_wakeup_disable(dev);
pm_runtime_remove(dev);
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c0bd03c83b9c..8b2745c69e2a 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_init(struct device *dev)
{
+ spin_lock_init(&dev->power.lock);
pm_runtime_init(dev);
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index b78c401ffa73..e9520ad2d716 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1099,8 +1099,6 @@ EXPORT_SYMBOL_GPL(pm_runtime_allow);
*/
void pm_runtime_init(struct device *dev)
{
- spin_lock_init(&dev->power.lock);
-
dev->power.runtime_status = RPM_SUSPENDED;
dev->power.idle_notification = false;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e56b4388fe61..8859780817e1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -210,11 +210,122 @@ static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
static ssize_t wakeup_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%lu\n", dev->power.wakeup_count);
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->event_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
-#endif
+
+static ssize_t wakeup_active_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->active_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+
+static ssize_t wakeup_hit_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->hit_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL);
+
+static ssize_t wakeup_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int active = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ active = dev->power.wakeup->active;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+
+static ssize_t wakeup_total_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->total_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+
+static ssize_t wakeup_max_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->max_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+
+static ssize_t wakeup_last_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->last_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
#ifdef CONFIG_PM_RUNTIME
@@ -288,6 +399,12 @@ static struct attribute * power_attrs[] = {
&dev_attr_wakeup.attr,
#ifdef CONFIG_PM_SLEEP
&dev_attr_wakeup_count.attr,
+ &dev_attr_wakeup_active_count.attr,
+ &dev_attr_wakeup_hit_count.attr,
+ &dev_attr_wakeup_active.attr,
+ &dev_attr_wakeup_total_time_ms.attr,
+ &dev_attr_wakeup_max_time_ms.attr,
+ &dev_attr_wakeup_last_time_ms.attr,
#endif
#ifdef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_async.attr,
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb594facfc3f..03751a01dbad 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -11,7 +11,10 @@
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/suspend.h>
-#include <linux/pm.h>
+
+#include "power.h"
+
+#define TIMEOUT 100
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
@@ -20,18 +23,244 @@
bool events_check_enabled;
/* The counter of registered wakeup events. */
-static unsigned long event_count;
+static atomic_t event_count = ATOMIC_INIT(0);
/* A preserved old value of event_count. */
-static unsigned long saved_event_count;
+static unsigned int saved_count;
/* The counter of wakeup events being processed. */
-static unsigned long events_in_progress;
+static atomic_t events_in_progress = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(events_lock);
static void pm_wakeup_timer_fn(unsigned long data);
-static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0);
-static unsigned long events_timer_expires;
+static LIST_HEAD(wakeup_sources);
+
+/**
+ * wakeup_source_create - Create a struct wakeup_source object.
+ * @name: Name of the new wakeup source.
+ */
+struct wakeup_source *wakeup_source_create(const char *name)
+{
+ struct wakeup_source *ws;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return NULL;
+
+ spin_lock_init(&ws->lock);
+ if (name)
+ ws->name = kstrdup(name, GFP_KERNEL);
+
+ return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_create);
+
+/**
+ * wakeup_source_destroy - Destroy a struct wakeup_source object.
+ * @ws: Wakeup source to destroy.
+ */
+void wakeup_source_destroy(struct wakeup_source *ws)
+{
+ if (!ws)
+ return;
+
+ spin_lock_irq(&ws->lock);
+ while (ws->active) {
+ spin_unlock_irq(&ws->lock);
+
+ schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
+
+ spin_lock_irq(&ws->lock);
+ }
+ spin_unlock_irq(&ws->lock);
+
+ kfree(ws->name);
+ kfree(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_destroy);
+
+/**
+ * wakeup_source_add - Add given object to the list of wakeup sources.
+ * @ws: Wakeup source object to add to the list.
+ */
+void wakeup_source_add(struct wakeup_source *ws)
+{
+ if (WARN_ON(!ws))
+ return;
+
+ setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
+ ws->active = false;
+
+ spin_lock_irq(&events_lock);
+ list_add_rcu(&ws->entry, &wakeup_sources);
+ spin_unlock_irq(&events_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+/**
+ * wakeup_source_remove - Remove given object from the wakeup sources list.
+ * @ws: Wakeup source object to remove from the list.
+ */
+void wakeup_source_remove(struct wakeup_source *ws)
+{
+ if (WARN_ON(!ws))
+ return;
+
+ spin_lock_irq(&events_lock);
+ list_del_rcu(&ws->entry);
+ spin_unlock_irq(&events_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(wakeup_source_remove);
+
+/**
+ * wakeup_source_register - Create wakeup source and add it to the list.
+ * @name: Name of the wakeup source to register.
+ */
+struct wakeup_source *wakeup_source_register(const char *name)
+{
+ struct wakeup_source *ws;
+
+ ws = wakeup_source_create(name);
+ if (ws)
+ wakeup_source_add(ws);
+
+ return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_register);
+
+/**
+ * wakeup_source_unregister - Remove wakeup source from the list and remove it.
+ * @ws: Wakeup source object to unregister.
+ */
+void wakeup_source_unregister(struct wakeup_source *ws)
+{
+ wakeup_source_remove(ws);
+ wakeup_source_destroy(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_unregister);
+
+/**
+ * device_wakeup_attach - Attach a wakeup source object to a device object.
+ * @dev: Device to handle.
+ * @ws: Wakeup source object to attach to @dev.
+ *
+ * This causes @dev to be treated as a wakeup device.
+ */
+static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ spin_unlock_irq(&dev->power.lock);
+ return -EEXIST;
+ }
+ dev->power.wakeup = ws;
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
+}
+
+/**
+ * device_wakeup_enable - Enable given device to be a wakeup source.
+ * @dev: Device to handle.
+ *
+ * Create a wakeup source object, register it and attach it to @dev.
+ */
+int device_wakeup_enable(struct device *dev)
+{
+ struct wakeup_source *ws;
+ int ret;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ ws = wakeup_source_register(dev_name(dev));
+ if (!ws)
+ return -ENOMEM;
+
+ ret = device_wakeup_attach(dev, ws);
+ if (ret)
+ wakeup_source_unregister(ws);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_enable);
+
+/**
+ * device_wakeup_detach - Detach a device's wakeup source object from it.
+ * @dev: Device to detach the wakeup source object from.
+ *
+ * After it returns, @dev will not be treated as a wakeup device any more.
+ */
+static struct wakeup_source *device_wakeup_detach(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ spin_lock_irq(&dev->power.lock);
+ ws = dev->power.wakeup;
+ dev->power.wakeup = NULL;
+ spin_unlock_irq(&dev->power.lock);
+ return ws;
+}
+
+/**
+ * device_wakeup_disable - Do not regard a device as a wakeup source any more.
+ * @dev: Device to handle.
+ *
+ * Detach the @dev's wakeup source object from it, unregister this wakeup source
+ * object and destroy it.
+ */
+int device_wakeup_disable(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ ws = device_wakeup_detach(dev);
+ if (ws)
+ wakeup_source_unregister(ws);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_disable);
+
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc.
+ */
+int device_init_wakeup(struct device *dev, bool enable)
+{
+ int ret = 0;
+
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ ret = device_wakeup_enable(dev);
+ } else {
+ device_set_wakeup_capable(dev, false);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_init_wakeup);
+
+/**
+ * device_set_wakeup_enable - Enable or disable a device to wake up the system.
+ * @dev: Device to handle.
+ */
+int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/*
* The functions below use the observation that each wakeup event starts a
@@ -55,118 +284,259 @@ static unsigned long events_timer_expires;
* knowledge, however, may not be available to it, so it can simply specify time
* to wait before the system can be suspended and pass it as the second
* argument of pm_wakeup_event().
+ *
+ * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
+ * "no suspend" period will be ended either by the pm_relax(), or by the timer
+ * function executed when the timer expires, whichever comes first.
*/
/**
+ * wakup_source_activate - Mark given wakeup source as active.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and, if @ws has just been activated, notify the PM
+ * core of the event by incrementing the counter of of wakeup events being
+ * processed.
+ */
+static void wakeup_source_activate(struct wakeup_source *ws)
+{
+ ws->active = true;
+ ws->active_count++;
+ ws->timer_expires = jiffies;
+ ws->last_time = ktime_get();
+
+ atomic_inc(&events_in_progress);
+}
+
+/**
+ * __pm_stay_awake - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_stay_awake(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+ ws->event_count++;
+ if (!ws->active)
+ wakeup_source_activate(ws);
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_stay_awake);
+
+/**
* pm_stay_awake - Notify the PM core that a wakeup event is being processed.
* @dev: Device the wakeup event is related to.
*
- * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the
- * counter of wakeup events being processed. If @dev is not NULL, the counter
- * of wakeup events related to @dev is incremented too.
+ * Notify the PM core of a wakeup event (signaled by @dev) by calling
+ * __pm_stay_awake for the @dev's wakeup source object.
*
* Call this function after detecting of a wakeup event if pm_relax() is going
* to be called directly after processing the event (and possibly passing it to
* user space for further processing).
- *
- * It is safe to call this function from interrupt context.
*/
void pm_stay_awake(struct device *dev)
{
unsigned long flags;
- spin_lock_irqsave(&events_lock, flags);
- if (dev)
- dev->power.wakeup_count++;
+ if (!dev)
+ return;
- events_in_progress++;
- spin_unlock_irqrestore(&events_lock, flags);
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_stay_awake(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
+EXPORT_SYMBOL_GPL(pm_stay_awake);
/**
- * pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * wakup_source_deactivate - Mark given wakeup source as inactive.
+ * @ws: Wakeup source to handle.
*
- * Notify the PM core that a wakeup event has been processed by decrementing
- * the counter of wakeup events being processed and incrementing the counter
- * of registered wakeup events.
+ * Update the @ws' statistics and notify the PM core that the wakeup source has
+ * become inactive by decrementing the counter of wakeup events being processed
+ * and incrementing the counter of registered wakeup events.
+ */
+static void wakeup_source_deactivate(struct wakeup_source *ws)
+{
+ ktime_t duration;
+ ktime_t now;
+
+ ws->relax_count++;
+ /*
+ * __pm_relax() may be called directly or from a timer function.
+ * If it is called directly right after the timer function has been
+ * started, but before the timer function calls __pm_relax(), it is
+ * possible that __pm_stay_awake() will be called in the meantime and
+ * will set ws->active. Then, ws->active may be cleared immediately
+ * by the __pm_relax() called from the timer function, but in such a
+ * case ws->relax_count will be different from ws->active_count.
+ */
+ if (ws->relax_count != ws->active_count) {
+ ws->relax_count--;
+ return;
+ }
+
+ ws->active = false;
+
+ now = ktime_get();
+ duration = ktime_sub(now, ws->last_time);
+ ws->total_time = ktime_add(ws->total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
+ ws->max_time = duration;
+
+ del_timer(&ws->timer);
+
+ /*
+ * event_count has to be incremented before events_in_progress is
+ * modified, so that the callers of pm_check_wakeup_events() and
+ * pm_save_wakeup_count() don't see the old value of event_count and
+ * events_in_progress equal to zero at the same time.
+ */
+ atomic_inc(&event_count);
+ smp_mb__before_atomic_dec();
+ atomic_dec(&events_in_progress);
+}
+
+/**
+ * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @ws: Wakeup source object associated with the source of the event.
*
* Call this function for wakeup events whose processing started with calling
- * pm_stay_awake().
+ * __pm_stay_awake().
*
* It is safe to call it from interrupt context.
*/
-void pm_relax(void)
+void __pm_relax(struct wakeup_source *ws)
{
unsigned long flags;
- spin_lock_irqsave(&events_lock, flags);
- if (events_in_progress) {
- events_in_progress--;
- event_count++;
- }
- spin_unlock_irqrestore(&events_lock, flags);
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+ if (ws->active)
+ wakeup_source_deactivate(ws);
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_relax);
+
+/**
+ * pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @dev: Device that signaled the event.
+ *
+ * Execute __pm_relax() for the @dev's wakeup source object.
+ */
+void pm_relax(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_relax(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
+EXPORT_SYMBOL_GPL(pm_relax);
/**
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
+ * @data: Address of the wakeup source object associated with the event source.
*
- * Decrease the counter of wakeup events being processed after it was increased
- * by pm_wakeup_event().
+ * Call __pm_relax() for the wakeup source whose address is stored in @data.
*/
static void pm_wakeup_timer_fn(unsigned long data)
{
+ __pm_relax((struct wakeup_source *)data);
+}
+
+/**
+ * __pm_wakeup_event - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the event source.
+ * @msec: Anticipated event processing time (in milliseconds).
+ *
+ * Notify the PM core of a wakeup event whose source is @ws that will take
+ * approximately @msec milliseconds to be processed by the kernel. If @ws is
+ * not active, activate it. If @msec is nonzero, set up the @ws' timer to
+ * execute pm_wakeup_timer_fn() in future.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
+{
unsigned long flags;
+ unsigned long expires;
- spin_lock_irqsave(&events_lock, flags);
- if (events_timer_expires
- && time_before_eq(events_timer_expires, jiffies)) {
- events_in_progress--;
- events_timer_expires = 0;
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ ws->event_count++;
+ if (!ws->active)
+ wakeup_source_activate(ws);
+
+ if (!msec) {
+ wakeup_source_deactivate(ws);
+ goto unlock;
}
- spin_unlock_irqrestore(&events_lock, flags);
+
+ expires = jiffies + msecs_to_jiffies(msec);
+ if (!expires)
+ expires = 1;
+
+ if (time_after(expires, ws->timer_expires)) {
+ mod_timer(&ws->timer, expires);
+ ws->timer_expires = expires;
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&ws->lock, flags);
}
+EXPORT_SYMBOL_GPL(__pm_wakeup_event);
+
/**
* pm_wakeup_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
*
- * Notify the PM core of a wakeup event (signaled by @dev) that will take
- * approximately @msec milliseconds to be processed by the kernel. Increment
- * the counter of registered wakeup events and (if @msec is nonzero) set up
- * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the
- * timer has not been set up already, increment the counter of wakeup events
- * being processed). If @dev is not NULL, the counter of wakeup events related
- * to @dev is incremented too.
- *
- * It is safe to call this function from interrupt context.
+ * Call __pm_wakeup_event() for the @dev's wakeup source object.
*/
void pm_wakeup_event(struct device *dev, unsigned int msec)
{
unsigned long flags;
- spin_lock_irqsave(&events_lock, flags);
- event_count++;
- if (dev)
- dev->power.wakeup_count++;
-
- if (msec) {
- unsigned long expires;
+ if (!dev)
+ return;
- expires = jiffies + msecs_to_jiffies(msec);
- if (!expires)
- expires = 1;
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_wakeup_event(dev->power.wakeup, msec);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_event);
- if (!events_timer_expires
- || time_after(expires, events_timer_expires)) {
- if (!events_timer_expires)
- events_in_progress++;
+/**
+ * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
+ */
+static void pm_wakeup_update_hit_counts(void)
+{
+ unsigned long flags;
+ struct wakeup_source *ws;
- mod_timer(&events_timer, expires);
- events_timer_expires = expires;
- }
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ spin_lock_irqsave(&ws->lock, flags);
+ if (ws->active)
+ ws->hit_count++;
+ spin_unlock_irqrestore(&ws->lock, flags);
}
- spin_unlock_irqrestore(&events_lock, flags);
+ rcu_read_unlock();
}
/**
@@ -184,10 +554,13 @@ bool pm_check_wakeup_events(void)
spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
- ret = (event_count == saved_event_count) && !events_in_progress;
+ ret = ((unsigned int)atomic_read(&event_count) == saved_count)
+ && !atomic_read(&events_in_progress);
events_check_enabled = ret;
}
spin_unlock_irqrestore(&events_lock, flags);
+ if (!ret)
+ pm_wakeup_update_hit_counts();
return ret;
}
@@ -202,24 +575,20 @@ bool pm_check_wakeup_events(void)
* drop down to zero has been interrupted by a signal (and the current number
* of wakeup events being processed is still nonzero). Otherwise return true.
*/
-bool pm_get_wakeup_count(unsigned long *count)
+bool pm_get_wakeup_count(unsigned int *count)
{
bool ret;
- spin_lock_irq(&events_lock);
if (capable(CAP_SYS_ADMIN))
events_check_enabled = false;
- while (events_in_progress && !signal_pending(current)) {
- spin_unlock_irq(&events_lock);
-
- schedule_timeout_interruptible(msecs_to_jiffies(100));
-
- spin_lock_irq(&events_lock);
+ while (atomic_read(&events_in_progress) && !signal_pending(current)) {
+ pm_wakeup_update_hit_counts();
+ schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
}
- *count = event_count;
- ret = !events_in_progress;
- spin_unlock_irq(&events_lock);
+
+ ret = !atomic_read(&events_in_progress);
+ *count = atomic_read(&event_count);
return ret;
}
@@ -232,16 +601,19 @@ bool pm_get_wakeup_count(unsigned long *count)
* old number of registered wakeup events to be used by pm_check_wakeup_events()
* and return true. Otherwise return false.
*/
-bool pm_save_wakeup_count(unsigned long count)
+bool pm_save_wakeup_count(unsigned int count)
{
bool ret = false;
spin_lock_irq(&events_lock);
- if (count == event_count && !events_in_progress) {
- saved_event_count = count;
+ if (count == (unsigned int)atomic_read(&event_count)
+ && !atomic_read(&events_in_progress)) {
+ saved_count = count;
events_check_enabled = true;
ret = true;
}
spin_unlock_irq(&events_lock);
+ if (!ret)
+ pm_wakeup_update_hit_counts();
return ret;
}
OpenPOWER on IntegriCloud