diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/dd.c | 17 | ||||
-rw-r--r-- | drivers/base/platform.c | 7 | ||||
-rw-r--r-- | drivers/base/power/main.c | 66 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 122 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 6 |
5 files changed, 106 insertions, 112 deletions
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8986b9f22781..62ec61e8f84a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static struct workqueue_struct *deferred_wq; +static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /** * deferred_probe_work_func() - Retry probing devices in the active list. @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false; * This functions moves all devices from the pending list to the active * list and schedules the deferred probe workqueue to process them. It * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occured and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again. */ static void driver_deferred_probe_trigger(void) { @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void) * into the active list so they can be retried by the workqueue */ mutex_lock(&deferred_probe_mutex); + atomic_inc(&deferred_trigger_count); list_splice_tail_init(&deferred_probe_pending_list, &deferred_probe_active_list); mutex_unlock(&deferred_probe_mutex); @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); static int really_probe(struct device *dev, struct device_driver *drv) { int ret = 0; + int local_trigger_count = atomic_read(&deferred_trigger_count); atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", @@ -310,6 +324,9 @@ probe_failed: /* Driver requested deferred probing */ dev_info(dev, "Driver %s requests probe deferral\n", drv->name); driver_deferred_probe_add(dev); + /* Did a trigger occur while probing? Need to re-trigger if yes */ + if (local_trigger_count != atomic_read(&deferred_trigger_count)) + driver_deferred_probe_trigger(); } else if (ret != -ENODEV && ret != -ENXIO) { /* driver matched but the probe failed */ printk(KERN_WARNING diff --git a/drivers/base/platform.c b/drivers/base/platform.c index e714709704e4..5b47210889e0 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -13,6 +13,7 @@ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> @@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) return -ENXIO; return dev->archdata.irqs[num]; #else - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); + struct resource *r; + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) + return of_irq_get(dev->dev.of_node, num); + + r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; #endif diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 86d5e4fb5b98..343ffad59377 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -479,7 +479,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; if (!dev->power.is_noirq_suspended) @@ -605,7 +605,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; if (!dev->power.is_late_suspended) @@ -735,6 +735,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + /* Match the pm_runtime_disable() in __device_suspend(). */ + pm_runtime_enable(dev); + goto Complete; + } + dpm_wait(dev->parent, async); dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1007,7 +1013,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a goto Complete; } - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Complete; dpm_wait_for_children(dev, async); @@ -1146,7 +1152,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as goto Complete; } - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Complete; dpm_wait_for_children(dev, async); @@ -1332,6 +1338,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + if (pm_runtime_status_suspended(dev)) { + pm_runtime_disable(dev); + if (pm_runtime_suspended_if_enabled(dev)) + goto Complete; + + pm_runtime_enable(dev); + } + dev->power.direct_complete = false; + } + dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1382,10 +1399,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { + struct device *parent = dev->parent; + dev->power.is_suspended = true; - if (dev->power.wakeup_path - && dev->parent && !dev->parent->power.ignore_children) - dev->parent->power.wakeup_path = true; + if (parent) { + spin_lock_irq(&parent->power.lock); + + dev->parent->power.direct_complete = false; + if (dev->power.wakeup_path + && !dev->parent->power.ignore_children) + dev->parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); + } } device_unlock(dev); @@ -1487,7 +1513,7 @@ static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; char *info = NULL; - int error = 0; + int ret = 0; if (dev->power.syscore) return 0; @@ -1523,17 +1549,27 @@ static int device_prepare(struct device *dev, pm_message_t state) callback = dev->driver->pm->prepare; } - if (callback) { - error = callback(dev); - suspend_report_result(callback, error); - } + if (callback) + ret = callback(dev); device_unlock(dev); - if (error) + if (ret < 0) { + suspend_report_result(callback, ret); pm_runtime_put(dev); - - return error; + return ret; + } + /* + * A positive return value from ->prepare() means "this device appears + * to be runtime-suspended and its state is fine, so if it really is + * runtime-suspended, you can leave it in that state provided that you + * will do the same thing with all of its descendants". This only + * applies to suspend transitions, however. + */ + spin_lock_irq(&dev->power.lock); + dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; + spin_unlock_irq(&dev->power.lock); + return 0; } /** diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 25538675d59e..89ced955fafa 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -15,7 +15,6 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> -#include <linux/cpufreq.h> #include <linux/device.h> #include <linux/list.h> #include <linux/rculist.h> @@ -394,6 +393,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0: On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST: Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM: Memory allocation failure */ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { @@ -443,15 +449,31 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) new_opp->u_volt = u_volt; new_opp->available = true; - /* Insert new OPP in order of increasing frequency */ + /* + * Insert new OPP in order of increasing frequency + * and discard if already present + */ head = &dev_opp->opp_list; list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { - if (new_opp->rate < opp->rate) + if (new_opp->rate <= opp->rate) break; else head = &opp->node; } + /* Duplicate OPPs ? */ + if (new_opp->rate == opp->rate) { + int ret = opp->available && new_opp->u_volt == opp->u_volt ? + 0 : -EEXIST; + + dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", + __func__, opp->rate, opp->u_volt, opp->available, + new_opp->rate, new_opp->u_volt, new_opp->available); + mutex_unlock(&dev_opp_list_lock); + kfree(new_opp); + return ret; + } + list_add_rcu(&new_opp->node, head); mutex_unlock(&dev_opp_list_lock); @@ -596,96 +618,6 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_disable); -#ifdef CONFIG_CPU_FREQ -/** - * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device - * @dev: device for which we do this operation - * @table: Cpufreq table returned back to caller - * - * Generate a cpufreq table for a provided device- this assumes that the - * opp list is already initialized and ready for usage. - * - * This function allocates required memory for the cpufreq table. It is - * expected that the caller does the required maintenance such as freeing - * the table as required. - * - * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM - * if no memory available for the operation (table is not populated), returns 0 - * if successful and table is populated. - * - * WARNING: It is important for the callers to ensure refreshing their copy of - * the table if any of the mentioned functions have been invoked in the interim. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * To simplify the logic, we pretend we are updater and hold relevant mutex here - * Callers should ensure that this function is *NOT* called under RCU protection - * or in contexts where mutex locking cannot be used. - */ -int dev_pm_opp_init_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *opp; - struct cpufreq_frequency_table *freq_table; - int i = 0; - - /* Pretend as if I am an updater */ - mutex_lock(&dev_opp_list_lock); - - dev_opp = find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int r = PTR_ERR(dev_opp); - mutex_unlock(&dev_opp_list_lock); - dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); - return r; - } - - freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * - (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL); - if (!freq_table) { - mutex_unlock(&dev_opp_list_lock); - dev_warn(dev, "%s: Unable to allocate frequency table\n", - __func__); - return -ENOMEM; - } - - list_for_each_entry(opp, &dev_opp->opp_list, node) { - if (opp->available) { - freq_table[i].driver_data = i; - freq_table[i].frequency = opp->rate / 1000; - i++; - } - } - mutex_unlock(&dev_opp_list_lock); - - freq_table[i].driver_data = i; - freq_table[i].frequency = CPUFREQ_TABLE_END; - - *table = &freq_table[0]; - - return 0; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); - -/** - * dev_pm_opp_free_cpufreq_table() - free the cpufreq table - * @dev: device for which we do this operation - * @table: table to free - * - * Free up the table allocated by dev_pm_opp_init_cpufreq_table - */ -void dev_pm_opp_free_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - if (!table) - return; - - kfree(*table); - *table = NULL; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); -#endif /* CONFIG_CPU_FREQ */ - /** * dev_pm_opp_get_notifier() - find notifier_head of the device with opp * @dev: device pointer used to lookup device OPPs. @@ -734,11 +666,9 @@ int of_init_opp_table(struct device *dev) unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); - if (dev_pm_opp_add(dev, freq, volt)) { + if (dev_pm_opp_add(dev, freq, volt)) dev_warn(dev, "%s: Failed to add OPP %ld\n", __func__, freq); - continue; - } nr -= 2; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2d56f4113ae7..eb1bd2ecad8b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable) { int ret = 0; + if (!dev) + return -EINVAL; + if (enable) { device_set_wakeup_capable(dev, true); ret = device_wakeup_enable(dev); } else { + if (dev->power.can_wakeup) + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); } |