diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Kconfig | 1 | ||||
-rw-r--r-- | drivers/base/bus.c | 1 | ||||
-rw-r--r-- | drivers/base/core.c | 71 | ||||
-rw-r--r-- | drivers/base/dd.c | 20 | ||||
-rw-r--r-- | drivers/base/dma-buf.c | 1 | ||||
-rw-r--r-- | drivers/base/dma-coherent.c | 1 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 49 | ||||
-rw-r--r-- | drivers/base/driver.c | 6 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 6 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 342 | ||||
-rw-r--r-- | drivers/base/power/main.c | 26 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 2 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 4 | ||||
-rw-r--r-- | drivers/base/regmap/internal.h | 17 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 57 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-mmio.c | 30 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 344 |
17 files changed, 821 insertions, 157 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 9b21469482ae..08b4c5209384 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -196,6 +196,7 @@ config CMA bool "Contiguous Memory Allocator (EXPERIMENTAL)" depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL select MIGRATION + select MEMORY_ISOLATION help This enables the Contiguous Memory Allocator which allows drivers to allocate big physically-contiguous blocks of memory for use with diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 2bcef657a60c..181ed2660b33 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -743,7 +743,6 @@ int bus_add_driver(struct device_driver *drv) } } - kobject_uevent(&priv->kobj, KOBJ_ADD); return 0; out_unregister: diff --git a/drivers/base/core.c b/drivers/base/core.c index 346be8b78b24..f338037a4f3d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -85,14 +85,13 @@ const char *dev_driver_string(const struct device *dev) } EXPORT_SYMBOL(dev_driver_string); -#define to_dev(obj) container_of(obj, struct device, kobj) #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dev_attr = to_dev_attr(attr); - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); ssize_t ret = -EIO; if (dev_attr->show) @@ -108,7 +107,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dev_attr = to_dev_attr(attr); - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); ssize_t ret = -EIO; if (dev_attr->store) @@ -182,7 +181,7 @@ EXPORT_SYMBOL_GPL(device_show_int); */ static void device_release(struct kobject *kobj) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); struct device_private *p = dev->p; if (dev->release) @@ -200,7 +199,7 @@ static void device_release(struct kobject *kobj) static const void *device_namespace(struct kobject *kobj) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); const void *ns = NULL; if (dev->class && dev->class->ns_type) @@ -221,7 +220,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) struct kobj_type *ktype = get_ktype(kobj); if (ktype == &device_ktype) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); if (dev->bus) return 1; if (dev->class) @@ -232,7 +231,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); if (dev->bus) return dev->bus->name; @@ -244,7 +243,7 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) static int dev_uevent(struct kset *kset, struct kobject *kobj, struct kobj_uevent_env *env) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); int retval = 0; /* add device node properties if present */ @@ -1132,7 +1131,7 @@ int device_register(struct device *dev) */ struct device *get_device(struct device *dev) { - return dev ? to_dev(kobject_get(&dev->kobj)) : NULL; + return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; } /** @@ -1754,25 +1753,25 @@ int device_move(struct device *dev, struct device *new_parent, set_dev_node(dev, dev_to_node(new_parent)); } - if (!dev->class) - goto out_put; - error = device_move_class_links(dev, old_parent, new_parent); - if (error) { - /* We ignore errors on cleanup since we're hosed anyway... */ - device_move_class_links(dev, new_parent, old_parent); - if (!kobject_move(&dev->kobj, &old_parent->kobj)) { - if (new_parent) - klist_remove(&dev->p->knode_parent); - dev->parent = old_parent; - if (old_parent) { - klist_add_tail(&dev->p->knode_parent, - &old_parent->p->klist_children); - set_dev_node(dev, dev_to_node(old_parent)); + if (dev->class) { + error = device_move_class_links(dev, old_parent, new_parent); + if (error) { + /* We ignore errors on cleanup since we're hosed anyway... */ + device_move_class_links(dev, new_parent, old_parent); + if (!kobject_move(&dev->kobj, &old_parent->kobj)) { + if (new_parent) + klist_remove(&dev->p->knode_parent); + dev->parent = old_parent; + if (old_parent) { + klist_add_tail(&dev->p->knode_parent, + &old_parent->p->klist_children); + set_dev_node(dev, dev_to_node(old_parent)); + } } + cleanup_glue_dir(dev, new_parent_kobj); + put_device(new_parent); + goto out; } - cleanup_glue_dir(dev, new_parent_kobj); - put_device(new_parent); - goto out; } switch (dpm_order) { case DPM_ORDER_NONE: @@ -1787,7 +1786,7 @@ int device_move(struct device *dev, struct device *new_parent, device_pm_move_last(dev); break; } -out_put: + put_device(old_parent); out: device_pm_unlock(); @@ -1812,6 +1811,13 @@ void device_shutdown(void) while (!list_empty(&devices_kset->list)) { dev = list_entry(devices_kset->list.prev, struct device, kobj.entry); + + /* + * hold reference count of device's parent to + * prevent it from being freed because parent's + * lock is to be held + */ + get_device(dev->parent); get_device(dev); /* * Make sure the device is off the kset list, in the @@ -1820,6 +1826,11 @@ void device_shutdown(void) list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); + /* hold lock to avoid race with probe/release */ + if (dev->parent) + device_lock(dev->parent); + device_lock(dev); + /* Don't allow any more runtime suspends */ pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); @@ -1831,7 +1842,13 @@ void device_shutdown(void) dev_dbg(dev, "shutdown\n"); dev->driver->shutdown(dev); } + + device_unlock(dev); + if (dev->parent) + device_unlock(dev->parent); + put_device(dev); + put_device(dev->parent); spin_lock(&devices_kset->list_lock); } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index dcb8a6e48692..e3bbed8a617c 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -85,8 +85,20 @@ static void deferred_probe_work_func(struct work_struct *work) * manipulate the deferred list */ mutex_unlock(&deferred_probe_mutex); + + /* + * Force the device to the end of the dpm_list since + * the PM code assumes that the order we add things to + * the list is a good order for suspend but deferred + * probe makes that very unsafe. + */ + device_pm_lock(); + device_pm_move_last(dev); + device_pm_unlock(); + dev_dbg(dev, "Retrying from deferred list\n"); bus_probe_device(dev); + mutex_lock(&deferred_probe_mutex); put_device(dev); @@ -283,6 +295,7 @@ probe_failed: devres_release_all(dev); driver_sysfs_remove(dev); dev->driver = NULL; + dev_set_drvdata(dev, NULL); if (ret == -EPROBE_DEFER) { /* Driver requested deferred probing */ @@ -356,10 +369,9 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) pr_debug("bus: '%s': %s: matched device %s with driver %s\n", drv->bus->name, __func__, dev_name(dev), drv->name); - pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); ret = really_probe(dev, drv); - pm_runtime_put_sync(dev); + pm_runtime_idle(dev); return ret; } @@ -406,9 +418,8 @@ int device_attach(struct device *dev) ret = 0; } } else { - pm_runtime_get_noresume(dev); ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); - pm_runtime_put_sync(dev); + pm_runtime_idle(dev); } out_unlock: device_unlock(dev); @@ -487,6 +498,7 @@ static void __device_release_driver(struct device *dev) drv->remove(dev); devres_release_all(dev); dev->driver = NULL; + dev_set_drvdata(dev, NULL); klist_remove(&dev->p->knode_driver); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 24e88fe29ec1..c30f3e1d0efc 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -493,6 +493,7 @@ EXPORT_SYMBOL_GPL(dma_buf_vmap); /** * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. * @dmabuf: [in] buffer to vunmap + * @vaddr: [in] vmap to vunmap */ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) { diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 1b85949e3d2f..560a7173f810 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -186,6 +186,7 @@ EXPORT_SYMBOL(dma_release_from_coherent); * @vma: vm_area for the userspace memory * @vaddr: cpu address returned by dma_alloc_from_coherent * @size: size of the memory buffer allocated by dma_alloc_from_coherent + * @ret: result from remap_pfn_range() * * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, maps that memory to the provided vma. diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 6f3676f1559f..3fbedc75e7c5 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -10,6 +10,7 @@ #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gfp.h> +#include <asm-generic/dma-coherent.h> /* * Managed DMA API @@ -217,4 +218,52 @@ void dmam_release_declared_memory(struct device *dev) } EXPORT_SYMBOL(dmam_release_declared_memory); +/* + * Create scatter-list for the already allocated DMA buffer. + */ +int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} +EXPORT_SYMBOL(dma_common_get_sgtable); + #endif + +/* + * Create userspace mapping for the DMA-coherent memory. + */ +int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int ret = -ENXIO; +#ifdef CONFIG_MMU + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + unsigned long off = vma->vm_pgoff; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < count && user_count <= (count - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } +#endif /* CONFIG_MMU */ + + return ret; +} +EXPORT_SYMBOL(dma_common_mmap); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 207c27ddf828..974e301a1ef0 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -185,8 +185,12 @@ int driver_register(struct device_driver *drv) if (ret) return ret; ret = driver_add_groups(drv, drv->groups); - if (ret) + if (ret) { bus_remove_driver(drv); + return ret; + } + kobject_uevent(&drv->p->kobj, KOBJ_ADD); + return ret; } EXPORT_SYMBOL_GPL(driver_register); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 5401814c874d..803cfc1597a9 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -22,8 +22,6 @@ #include <linux/slab.h> #include <linux/sched.h> -#define to_dev(obj) container_of(obj, struct device, kobj) - MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Multi purpose firmware loading support"); MODULE_LICENSE("GPL"); @@ -290,7 +288,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t ret_count; @@ -384,7 +382,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { - struct device *dev = to_dev(kobj); + struct device *dev = kobj_to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t retval; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 83aa694a8efe..ba3487c9835b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -75,19 +75,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) start_latency_ns, "start"); } -static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); -} - -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); -} - static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -139,6 +126,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd) genpd->status = GPD_STATE_ACTIVE; } +static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) +{ + s64 usecs64; + + if (!genpd->cpu_data) + return; + + usecs64 = genpd->power_on_latency_ns; + do_div(usecs64, NSEC_PER_USEC); + usecs64 += genpd->cpu_data->saved_exit_latency; + genpd->cpu_data->idle_state->exit_latency = usecs64; +} + /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. @@ -146,7 +146,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd) * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -int __pm_genpd_poweron(struct generic_pm_domain *genpd) +static int __pm_genpd_poweron(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; @@ -176,6 +176,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) return 0; } + if (genpd->cpu_data) { + cpuidle_pause_and_lock(); + genpd->cpu_data->idle_state->disabled = true; + cpuidle_resume_and_unlock(); + goto out; + } + /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles @@ -215,6 +222,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) if (elapsed_ns > genpd->power_on_latency_ns) { genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; + genpd_recalc_cpu_exit_latency(genpd); if (genpd->name) pr_warning("%s: Power-on latency exceeded, " "new value %lld ns\n", genpd->name, @@ -222,6 +230,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) } } + out: genpd_set_active(genpd); return 0; @@ -251,6 +260,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) #ifdef CONFIG_PM_RUNTIME +static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, + save_state_latency_ns, "state save"); +} + +static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, + restore_state_latency_ns, + "state restore"); +} + static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { @@ -275,7 +297,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, pdd = dev->power.subsys_data ? dev->power.subsys_data->domain_data : NULL; - if (pdd) { + if (pdd && pdd->dev) { to_gpd_data(pdd)->td.constraint_changed = true; genpd = dev_to_genpd(dev); } else { @@ -339,19 +361,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; + bool need_restore = gpd_data->need_restore; - if (!gpd_data->need_restore) - return; - + gpd_data->need_restore = false; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); - genpd_restore_dev(genpd, dev); - genpd_stop_dev(genpd, dev); + if (need_restore) + genpd_restore_dev(genpd, dev); mutex_lock(&genpd->lock); - - gpd_data->need_restore = false; } /** @@ -458,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } } + if (genpd->cpu_data) { + /* + * If cpu_data is set, cpuidle should turn the domain off when + * the CPU in it is idle. In that case we don't decrement the + * subdomain counts of the master domains, so that power is not + * removed from the current domain prematurely as a result of + * cutting off the masters' power. + */ + genpd->status = GPD_STATE_POWER_OFF; + cpuidle_pause_and_lock(); + genpd->cpu_data->idle_state->disabled = false; + cpuidle_resume_and_unlock(); + goto out; + } + if (genpd->power_off) { ktime_t time_start; s64 elapsed_ns; @@ -595,7 +629,7 @@ static int pm_genpd_runtime_resume(struct device *dev) /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) - goto out; + return genpd_start_dev(genpd, dev); mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); @@ -628,9 +662,6 @@ static int pm_genpd_runtime_resume(struct device *dev) wake_up_all(&genpd->status_wait_queue); mutex_unlock(&genpd->lock); - out: - genpd_start_dev(genpd, dev); - return 0; } @@ -1235,6 +1266,27 @@ static void pm_genpd_complete(struct device *dev) #endif /* CONFIG_PM_SLEEP */ +static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); + if (!gpd_data) + return NULL; + + mutex_init(&gpd_data->lock); + gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + dev_pm_qos_add_notifier(dev, &gpd_data->nb); + return gpd_data; +} + +static void __pm_genpd_free_dev_data(struct device *dev, + struct generic_pm_domain_data *gpd_data) +{ + dev_pm_qos_remove_notifier(dev, &gpd_data->nb); + kfree(gpd_data); +} + /** * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. @@ -1244,7 +1296,7 @@ static void pm_genpd_complete(struct device *dev) int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) { - struct generic_pm_domain_data *gpd_data; + struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; struct pm_domain_data *pdd; int ret = 0; @@ -1253,14 +1305,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; - gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); - if (!gpd_data) + gpd_data_new = __pm_genpd_alloc_dev_data(dev); + if (!gpd_data_new) return -ENOMEM; - mutex_init(&gpd_data->lock); - gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; - dev_pm_qos_add_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); if (genpd->prepared_count > 0) { @@ -1274,35 +1322,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, goto out; } + ret = dev_pm_get_subsys_data(dev); + if (ret) + goto out; + genpd->device_count++; genpd->max_off_time_changed = true; - dev_pm_get_subsys_data(dev); - - mutex_lock(&gpd_data->lock); spin_lock_irq(&dev->power.lock); + dev->pm_domain = &genpd->domain; - dev->power.subsys_data->domain_data = &gpd_data->base; - gpd_data->base.dev = dev; - list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + } else { + gpd_data = gpd_data_new; + dev->power.subsys_data->domain_data = &gpd_data->base; + } + gpd_data->refcount++; if (td) gpd_data->td = *td; + spin_unlock_irq(&dev->power.lock); + + mutex_lock(&gpd_data->lock); + gpd_data->base.dev = dev; + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); + gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; - spin_unlock_irq(&dev->power.lock); mutex_unlock(&gpd_data->lock); - genpd_release_lock(genpd); - - return 0; - out: genpd_release_lock(genpd); - dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - kfree(gpd_data); + if (gpd_data != gpd_data_new) + __pm_genpd_free_dev_data(dev, gpd_data_new); + return ret; } @@ -1348,6 +1403,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; + bool remove = false; int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -1368,22 +1424,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; spin_lock_irq(&dev->power.lock); + dev->pm_domain = NULL; pdd = dev->power.subsys_data->domain_data; list_del_init(&pdd->list_node); - dev->power.subsys_data->domain_data = NULL; + gpd_data = to_gpd_data(pdd); + if (--gpd_data->refcount == 0) { + dev->power.subsys_data->domain_data = NULL; + remove = true; + } + spin_unlock_irq(&dev->power.lock); - gpd_data = to_gpd_data(pdd); mutex_lock(&gpd_data->lock); pdd->dev = NULL; mutex_unlock(&gpd_data->lock); genpd_release_lock(genpd); - dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - kfree(gpd_data); dev_pm_put_subsys_data(dev); + if (remove) + __pm_genpd_free_dev_data(dev, gpd_data); + return 0; out: @@ -1541,33 +1603,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, * @dev: Device to add the callbacks to. * @ops: Set of callbacks to add. * @td: Timing data to add to the device along with the callbacks (optional). + * + * Every call to this routine should be balanced with a call to + * __pm_genpd_remove_callbacks() and they must not be nested. */ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td) { - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; int ret = 0; - if (!(dev && dev->power.subsys_data && ops)) + if (!(dev && ops)) return -EINVAL; + gpd_data_new = __pm_genpd_alloc_dev_data(dev); + if (!gpd_data_new) + return -ENOMEM; + pm_runtime_disable(dev); device_pm_lock(); - pdd = dev->power.subsys_data->domain_data; - if (pdd) { - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + ret = dev_pm_get_subsys_data(dev); + if (ret) + goto out; - gpd_data->ops = *ops; - if (td) - gpd_data->td = *td; + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); } else { - ret = -EINVAL; + gpd_data = gpd_data_new; + dev->power.subsys_data->domain_data = &gpd_data->base; } + gpd_data->refcount++; + gpd_data->ops = *ops; + if (td) + gpd_data->td = *td; + + spin_unlock_irq(&dev->power.lock); + out: device_pm_unlock(); pm_runtime_enable(dev); + if (gpd_data != gpd_data_new) + __pm_genpd_free_dev_data(dev, gpd_data_new); + return ret; } EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); @@ -1576,10 +1657,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. * @dev: Device to remove the callbacks from. * @clear_td: If set, clear the device's timing data too. + * + * This routine can only be called after pm_genpd_add_callbacks(). */ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data = NULL; + bool remove = false; int ret = 0; if (!(dev && dev->power.subsys_data)) @@ -1588,24 +1672,118 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) pm_runtime_disable(dev); device_pm_lock(); - pdd = dev->power.subsys_data->domain_data; - if (pdd) { - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + spin_lock_irq(&dev->power.lock); - gpd_data->ops = (struct gpd_dev_ops){ 0 }; + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + gpd_data->ops = (struct gpd_dev_ops){ NULL }; if (clear_td) gpd_data->td = (struct gpd_timing_data){ 0 }; + + if (--gpd_data->refcount == 0) { + dev->power.subsys_data->domain_data = NULL; + remove = true; + } } else { ret = -EINVAL; } + spin_unlock_irq(&dev->power.lock); + device_pm_unlock(); pm_runtime_enable(dev); - return ret; + if (ret) + return ret; + + dev_pm_put_subsys_data(dev); + if (remove) + __pm_genpd_free_dev_data(dev, gpd_data); + + return 0; } EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); +int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) +{ + struct cpuidle_driver *cpuidle_drv; + struct gpd_cpu_data *cpu_data; + struct cpuidle_state *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || state < 0) + return -EINVAL; + + genpd_acquire_lock(genpd); + + if (genpd->cpu_data) { + ret = -EEXIST; + goto out; + } + cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); + if (!cpu_data) { + ret = -ENOMEM; + goto out; + } + cpuidle_drv = cpuidle_driver_ref(); + if (!cpuidle_drv) { + ret = -ENODEV; + goto out; + } + if (cpuidle_drv->state_count <= state) { + ret = -EINVAL; + goto err; + } + idle_state = &cpuidle_drv->states[state]; + if (!idle_state->disabled) { + ret = -EAGAIN; + goto err; + } + cpu_data->idle_state = idle_state; + cpu_data->saved_exit_latency = idle_state->exit_latency; + genpd->cpu_data = cpu_data; + genpd_recalc_cpu_exit_latency(genpd); + + out: + genpd_release_lock(genpd); + return ret; + + err: + cpuidle_driver_unref(); + goto out; +} + +int genpd_detach_cpuidle(struct generic_pm_domain *genpd) +{ + struct gpd_cpu_data *cpu_data; + struct cpuidle_state *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd)) + return -EINVAL; + + genpd_acquire_lock(genpd); + + cpu_data = genpd->cpu_data; + if (!cpu_data) { + ret = -ENODEV; + goto out; + } + idle_state = cpu_data->idle_state; + if (!idle_state->disabled) { + ret = -EAGAIN; + goto out; + } + idle_state->exit_latency = cpu_data->saved_exit_latency; + cpuidle_driver_unref(); + genpd->cpu_data = NULL; + kfree(cpu_data); + + out: + genpd_release_lock(genpd); + return ret; +} + /* Default device callbacks for generic PM domains. */ /** @@ -1615,16 +1793,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); static int pm_genpd_default_save_state(struct device *dev) { int (*cb)(struct device *__dev); - struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.save_state; if (cb) return cb(dev); - if (drv && drv->pm && drv->pm->runtime_suspend) - return drv->pm->runtime_suspend(dev); + if (dev->type && dev->type->pm) + cb = dev->type->pm->runtime_suspend; + else if (dev->class && dev->class->pm) + cb = dev->class->pm->runtime_suspend; + else if (dev->bus && dev->bus->pm) + cb = dev->bus->pm->runtime_suspend; + else + cb = NULL; - return 0; + if (!cb && dev->driver && dev->driver->pm) + cb = dev->driver->pm->runtime_suspend; + + return cb ? cb(dev) : 0; } /** @@ -1634,16 +1820,24 @@ static int pm_genpd_default_save_state(struct device *dev) static int pm_genpd_default_restore_state(struct device *dev) { int (*cb)(struct device *__dev); - struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.restore_state; if (cb) return cb(dev); - if (drv && drv->pm && drv->pm->runtime_resume) - return drv->pm->runtime_resume(dev); + if (dev->type && dev->type->pm) + cb = dev->type->pm->runtime_resume; + else if (dev->class && dev->class->pm) + cb = dev->class->pm->runtime_resume; + else if (dev->bus && dev->bus->pm) + cb = dev->bus->pm->runtime_resume; + else + cb = NULL; - return 0; + if (!cb && dev->driver && dev->driver->pm) + cb = dev->driver->pm->runtime_resume; + + return cb ? cb(dev) : 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9cb845e49334..0113adc310dc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -28,7 +28,7 @@ #include <linux/sched.h> #include <linux/async.h> #include <linux/suspend.h> - +#include <linux/cpuidle.h> #include "../base.h" #include "power.h" @@ -45,10 +45,10 @@ typedef int (*pm_callback_t)(struct device *); */ LIST_HEAD(dpm_list); -LIST_HEAD(dpm_prepared_list); -LIST_HEAD(dpm_suspended_list); -LIST_HEAD(dpm_late_early_list); -LIST_HEAD(dpm_noirq_list); +static LIST_HEAD(dpm_prepared_list); +static LIST_HEAD(dpm_suspended_list); +static LIST_HEAD(dpm_late_early_list); +static LIST_HEAD(dpm_noirq_list); struct suspend_stats suspend_stats; static DEFINE_MUTEX(dpm_list_mtx); @@ -166,7 +166,7 @@ static ktime_t initcall_debug_start(struct device *dev) { ktime_t calltime = ktime_set(0, 0); - if (initcall_debug) { + if (pm_print_times_enabled) { pr_info("calling %s+ @ %i, parent: %s\n", dev_name(dev), task_pid_nr(current), dev->parent ? dev_name(dev->parent) : "none"); @@ -181,7 +181,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, { ktime_t delta, rettime; - if (initcall_debug) { + if (pm_print_times_enabled) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), @@ -467,6 +467,7 @@ static void dpm_resume_noirq(pm_message_t state) mutex_unlock(&dpm_list_mtx); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); + cpuidle_resume(); } /** @@ -867,6 +868,7 @@ static int dpm_suspend_noirq(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_late_early_list)) { @@ -989,8 +991,16 @@ static int dpm_suspend_late(pm_message_t state) int dpm_suspend_end(pm_message_t state) { int error = dpm_suspend_late(state); + if (error) + return error; + + error = dpm_suspend_noirq(state); + if (error) { + dpm_resume_early(state); + return error; + } - return error ? : dpm_suspend_noirq(state); + return 0; } EXPORT_SYMBOL_GPL(dpm_suspend_end); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index fd849a2c4fa8..74a67e0019a2 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -462,7 +462,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); static void __dev_pm_qos_drop_user_request(struct device *dev) { dev_pm_qos_remove_request(dev->power.pq_req); - dev->power.pq_req = 0; + dev->power.pq_req = NULL; } /** diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 48be2ad4dd2c..b91dc6f1e914 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -474,6 +474,8 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); #endif +#ifdef CONFIG_PM_SLEEP + static ssize_t async_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -500,6 +502,8 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(async, 0644, async_show, async_store); + +#endif #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index b986b8660b0c..80f9ab9c3aa4 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -95,6 +95,9 @@ struct regmap { /* if set, converts bulk rw to single rw */ bool use_single_rw; + + struct rb_root range_tree; + void *selector_work_buf; /* Scratch buffer used for selector */ }; struct regcache_ops { @@ -115,6 +118,20 @@ bool regmap_precious(struct regmap *map, unsigned int reg); int _regmap_write(struct regmap *map, unsigned int reg, unsigned int val); +struct regmap_range_node { + struct rb_node node; + + unsigned int range_min; + unsigned int range_max; + + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + + unsigned int window_start; + unsigned int window_len; +}; + #ifdef CONFIG_DEBUG_FS extern void regmap_debugfs_initcall(void); extern void regmap_debugfs_init(struct regmap *map, const char *name); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 4fac4b9be88f..a89734621e51 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -24,14 +24,18 @@ struct regmap_irq_chip_data { struct mutex lock; struct regmap *map; - struct regmap_irq_chip *chip; + const struct regmap_irq_chip *chip; int irq_base; struct irq_domain *domain; + int irq; + int wake_count; + unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; + unsigned int *wake_buf; unsigned int irq_reg_stride; }; @@ -71,6 +75,16 @@ static void regmap_irq_sync_unlock(struct irq_data *data) d->chip->mask_base + (i * map->reg_stride)); } + /* If we've changed our wakeup count propagate it to the parent */ + if (d->wake_count < 0) + for (i = d->wake_count; i < 0; i++) + irq_set_irq_wake(d->irq, 0); + else if (d->wake_count > 0) + for (i = 0; i < d->wake_count; i++) + irq_set_irq_wake(d->irq, 1); + + d->wake_count = 0; + mutex_unlock(&d->lock); } @@ -92,18 +106,41 @@ static void regmap_irq_disable(struct irq_data *data) d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; } +static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + + if (!d->chip->wake_base) + return -EINVAL; + + if (on) { + d->wake_buf[irq_data->reg_offset / map->reg_stride] + &= ~irq_data->mask; + d->wake_count++; + } else { + d->wake_buf[irq_data->reg_offset / map->reg_stride] + |= irq_data->mask; + d->wake_count--; + } + + return 0; +} + static struct irq_chip regmap_irq_chip = { .name = "regmap", .irq_bus_lock = regmap_irq_lock, .irq_bus_sync_unlock = regmap_irq_sync_unlock, .irq_disable = regmap_irq_disable, .irq_enable = regmap_irq_enable, + .irq_set_wake = regmap_irq_set_wake, }; static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; - struct regmap_irq_chip *chip = data->chip; + const struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; int ret, i; bool handled = false; @@ -195,7 +232,7 @@ static struct irq_domain_ops regmap_domain_ops = { * register values used by the IRQ controller over suspend and resume. */ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, - int irq_base, struct regmap_irq_chip *chip, + int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data *d; @@ -240,6 +277,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d->mask_buf_def) goto err_alloc; + if (chip->wake_base) { + d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + GFP_KERNEL); + if (!d->wake_buf) + goto err_alloc; + } + + d->irq = irq; d->map = map; d->chip = chip; d->irq_base = irq_base; @@ -294,6 +339,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, err_domain: /* Should really dispose of the domain but... */ err_alloc: + kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); @@ -315,6 +361,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) free_irq(irq, d); /* We should unmap the domain but... */ + kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); @@ -346,6 +393,10 @@ EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); */ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) { + /* Handle holes in the IRQ list */ + if (!data->chip->irqs[irq].mask) + return -EINVAL; + return irq_create_mapping(data->domain, irq); } EXPORT_SYMBOL_GPL(regmap_irq_get_virq); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index febd6de6c8ac..f05fc74dd84a 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -37,7 +37,7 @@ static int regmap_mmio_gather_write(void *context, BUG_ON(reg_size != 4); - offset = be32_to_cpup(reg); + offset = *(u32 *)reg; while (val_size) { switch (ctx->val_bytes) { @@ -45,14 +45,14 @@ static int regmap_mmio_gather_write(void *context, writeb(*(u8 *)val, ctx->regs + offset); break; case 2: - writew(be16_to_cpup(val), ctx->regs + offset); + writew(*(u16 *)val, ctx->regs + offset); break; case 4: - writel(be32_to_cpup(val), ctx->regs + offset); + writel(*(u32 *)val, ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - writeq(be64_to_cpup(val), ctx->regs + offset); + writeq(*(u64 *)val, ctx->regs + offset); break; #endif default: @@ -83,7 +83,7 @@ static int regmap_mmio_read(void *context, BUG_ON(reg_size != 4); - offset = be32_to_cpup(reg); + offset = *(u32 *)reg; while (val_size) { switch (ctx->val_bytes) { @@ -91,14 +91,14 @@ static int regmap_mmio_read(void *context, *(u8 *)val = readb(ctx->regs + offset); break; case 2: - *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset)); + *(u16 *)val = readw(ctx->regs + offset); break; case 4: - *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset)); + *(u32 *)val = readl(ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset)); + *(u64 *)val = readq(ctx->regs + offset); break; #endif default: @@ -124,9 +124,11 @@ static struct regmap_bus regmap_mmio = { .gather_write = regmap_mmio_gather_write, .read = regmap_mmio_read, .free_context = regmap_mmio_free_context, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; -struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, +static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, const struct regmap_config *config) { struct regmap_mmio_context *ctx; @@ -162,7 +164,15 @@ struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, if (config->reg_stride < min_stride) return ERR_PTR(-EINVAL); - ctx = kzalloc(GFP_KERNEL, sizeof(*ctx)); + switch (config->reg_format_endian) { + case REGMAP_ENDIAN_DEFAULT: + case REGMAP_ENDIAN_NATIVE: + break; + default: + return ERR_PTR(-EINVAL); + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c89aa01fb1de..c241ae2f2f10 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -15,12 +15,25 @@ #include <linux/export.h> #include <linux/mutex.h> #include <linux/err.h> +#include <linux/rbtree.h> #define CREATE_TRACE_POINTS #include <trace/events/regmap.h> #include "internal.h" +/* + * Sometimes for failures during very early init the trace + * infrastructure isn't available early enough to be used. For this + * sort of problem defining LOG_DEVICE will add printks for basic + * register I/O on a specific device. + */ +#undef LOG_DEVICE + +static int _regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change); + bool regmap_writeable(struct regmap *map, unsigned int reg) { if (map->max_register && reg > map->max_register) @@ -119,13 +132,19 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) b[0] = val << shift; } -static void regmap_format_16(void *buf, unsigned int val, unsigned int shift) +static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) { __be16 *b = buf; b[0] = cpu_to_be16(val << shift); } +static void regmap_format_16_native(void *buf, unsigned int val, + unsigned int shift) +{ + *(u16 *)buf = val << shift; +} + static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) { u8 *b = buf; @@ -137,13 +156,19 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) b[2] = val; } -static void regmap_format_32(void *buf, unsigned int val, unsigned int shift) +static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) { __be32 *b = buf; b[0] = cpu_to_be32(val << shift); } +static void regmap_format_32_native(void *buf, unsigned int val, + unsigned int shift) +{ + *(u32 *)buf = val << shift; +} + static unsigned int regmap_parse_8(void *buf) { u8 *b = buf; @@ -151,7 +176,7 @@ static unsigned int regmap_parse_8(void *buf) return b[0]; } -static unsigned int regmap_parse_16(void *buf) +static unsigned int regmap_parse_16_be(void *buf) { __be16 *b = buf; @@ -160,6 +185,11 @@ static unsigned int regmap_parse_16(void *buf) return b[0]; } +static unsigned int regmap_parse_16_native(void *buf) +{ + return *(u16 *)buf; +} + static unsigned int regmap_parse_24(void *buf) { u8 *b = buf; @@ -170,7 +200,7 @@ static unsigned int regmap_parse_24(void *buf) return ret; } -static unsigned int regmap_parse_32(void *buf) +static unsigned int regmap_parse_32_be(void *buf) { __be32 *b = buf; @@ -179,6 +209,11 @@ static unsigned int regmap_parse_32(void *buf) return b[0]; } +static unsigned int regmap_parse_32_native(void *buf) +{ + return *(u32 *)buf; +} + static void regmap_lock_mutex(struct regmap *map) { mutex_lock(&map->mutex); @@ -208,6 +243,67 @@ static void dev_get_regmap_release(struct device *dev, void *res) */ } +static bool _regmap_range_add(struct regmap *map, + struct regmap_range_node *data) +{ + struct rb_root *root = &map->range_tree; + struct rb_node **new = &(root->rb_node), *parent = NULL; + + while (*new) { + struct regmap_range_node *this = + container_of(*new, struct regmap_range_node, node); + + parent = *new; + if (data->range_max < this->range_min) + new = &((*new)->rb_left); + else if (data->range_min > this->range_max) + new = &((*new)->rb_right); + else + return false; + } + + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); + + return true; +} + +static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, + unsigned int reg) +{ + struct rb_node *node = map->range_tree.rb_node; + + while (node) { + struct regmap_range_node *this = + container_of(node, struct regmap_range_node, node); + + if (reg < this->range_min) + node = node->rb_left; + else if (reg > this->range_max) + node = node->rb_right; + else + return this; + } + + return NULL; +} + +static void regmap_range_exit(struct regmap *map) +{ + struct rb_node *next; + struct regmap_range_node *range_node; + + next = rb_first(&map->range_tree); + while (next) { + range_node = rb_entry(next, struct regmap_range_node, node); + next = rb_next(&range_node->node); + rb_erase(&range_node->node, &map->range_tree); + kfree(range_node); + } + + kfree(map->selector_work_buf); +} + /** * regmap_init(): Initialise register map * @@ -227,6 +323,8 @@ struct regmap *regmap_init(struct device *dev, { struct regmap *map, **m; int ret = -EINVAL; + enum regmap_endian reg_endian, val_endian; + int i, j; if (!bus || !config) goto err; @@ -275,6 +373,18 @@ struct regmap *regmap_init(struct device *dev, map->read_flag_mask = bus->read_flag_mask; } + reg_endian = config->reg_format_endian; + if (reg_endian == REGMAP_ENDIAN_DEFAULT) + reg_endian = bus->reg_format_endian_default; + if (reg_endian == REGMAP_ENDIAN_DEFAULT) + reg_endian = REGMAP_ENDIAN_BIG; + + val_endian = config->val_format_endian; + if (val_endian == REGMAP_ENDIAN_DEFAULT) + val_endian = bus->val_format_endian_default; + if (val_endian == REGMAP_ENDIAN_DEFAULT) + val_endian = REGMAP_ENDIAN_BIG; + switch (config->reg_bits + map->reg_shift) { case 2: switch (config->val_bits) { @@ -321,11 +431,29 @@ struct regmap *regmap_init(struct device *dev, break; case 16: - map->format.format_reg = regmap_format_16; + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_16_be; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_16_native; + break; + default: + goto err_map; + } break; case 32: - map->format.format_reg = regmap_format_32; + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_32_be; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_32_native; + break; + default: + goto err_map; + } break; default: @@ -338,21 +466,47 @@ struct regmap *regmap_init(struct device *dev, map->format.parse_val = regmap_parse_8; break; case 16: - map->format.format_val = regmap_format_16; - map->format.parse_val = regmap_parse_16; + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_16_be; + map->format.parse_val = regmap_parse_16_be; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_16_native; + map->format.parse_val = regmap_parse_16_native; + break; + default: + goto err_map; + } break; case 24: + if (val_endian != REGMAP_ENDIAN_BIG) + goto err_map; map->format.format_val = regmap_format_24; map->format.parse_val = regmap_parse_24; break; case 32: - map->format.format_val = regmap_format_32; - map->format.parse_val = regmap_parse_32; + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_32_be; + map->format.parse_val = regmap_parse_32_be; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_32_native; + map->format.parse_val = regmap_parse_32_native; + break; + default: + goto err_map; + } break; } - if (map->format.format_write) + if (map->format.format_write) { + if ((reg_endian != REGMAP_ENDIAN_BIG) || + (val_endian != REGMAP_ENDIAN_BIG)) + goto err_map; map->use_single_rw = true; + } if (!map->format.format_write && !(map->format.format_reg && map->format.format_val)) @@ -364,27 +518,88 @@ struct regmap *regmap_init(struct device *dev, goto err_map; } - regmap_debugfs_init(map, config->name); + map->range_tree = RB_ROOT; + for (i = 0; i < config->n_ranges; i++) { + const struct regmap_range_cfg *range_cfg = &config->ranges[i]; + struct regmap_range_node *new; + + /* Sanity check */ + if (range_cfg->range_max < range_cfg->range_min || + range_cfg->range_max > map->max_register || + range_cfg->selector_reg > map->max_register || + range_cfg->window_len == 0) + goto err_range; + + /* Make sure, that this register range has no selector + or data window within its boundary */ + for (j = 0; j < config->n_ranges; j++) { + unsigned sel_reg = config->ranges[j].selector_reg; + unsigned win_min = config->ranges[j].window_start; + unsigned win_max = win_min + + config->ranges[j].window_len - 1; + + if (range_cfg->range_min <= sel_reg && + sel_reg <= range_cfg->range_max) { + goto err_range; + } + + if (!(win_max < range_cfg->range_min || + win_min > range_cfg->range_max)) { + goto err_range; + } + } + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (new == NULL) { + ret = -ENOMEM; + goto err_range; + } + + new->range_min = range_cfg->range_min; + new->range_max = range_cfg->range_max; + new->selector_reg = range_cfg->selector_reg; + new->selector_mask = range_cfg->selector_mask; + new->selector_shift = range_cfg->selector_shift; + new->window_start = range_cfg->window_start; + new->window_len = range_cfg->window_len; + + if (_regmap_range_add(map, new) == false) { + kfree(new); + goto err_range; + } + + if (map->selector_work_buf == NULL) { + map->selector_work_buf = + kzalloc(map->format.buf_size, GFP_KERNEL); + if (map->selector_work_buf == NULL) { + ret = -ENOMEM; + goto err_range; + } + } + } ret = regcache_init(map, config); if (ret < 0) - goto err_debugfs; + goto err_range; + + regmap_debugfs_init(map, config->name); /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); if (!m) { ret = -ENOMEM; - goto err_cache; + goto err_debugfs; } *m = map; devres_add(dev, m); return map; -err_cache: - regcache_exit(map); err_debugfs: regmap_debugfs_exit(map); + regcache_exit(map); +err_range: + regmap_range_exit(map); kfree(map->work_buf); err_map: kfree(map); @@ -481,6 +696,7 @@ void regmap_exit(struct regmap *map) { regcache_exit(map); regmap_debugfs_exit(map); + regmap_range_exit(map); if (map->bus->free_context) map->bus->free_context(map->bus_context); kfree(map->work_buf); @@ -526,6 +742,57 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name) } EXPORT_SYMBOL_GPL(dev_get_regmap); +static int _regmap_select_page(struct regmap *map, unsigned int *reg, + unsigned int val_num) +{ + struct regmap_range_node *range; + void *orig_work_buf; + unsigned int win_offset; + unsigned int win_page; + bool page_chg; + int ret; + + range = _regmap_range_lookup(map, *reg); + if (range) { + win_offset = (*reg - range->range_min) % range->window_len; + win_page = (*reg - range->range_min) / range->window_len; + + if (val_num > 1) { + /* Bulk write shouldn't cross range boundary */ + if (*reg + val_num - 1 > range->range_max) + return -EINVAL; + + /* ... or single page boundary */ + if (val_num > range->window_len - win_offset) + return -EINVAL; + } + + /* It is possible to have selector register inside data window. + In that case, selector register is located on every page and + it needs no page switching, when accessed alone. */ + if (val_num > 1 || + range->window_start + win_offset != range->selector_reg) { + /* Use separate work_buf during page switching */ + orig_work_buf = map->work_buf; + map->work_buf = map->selector_work_buf; + + ret = _regmap_update_bits(map, range->selector_reg, + range->selector_mask, + win_page << range->selector_shift, + &page_chg); + + map->work_buf = orig_work_buf; + + if (ret < 0) + return ret; + } + + *reg = range->window_start + win_offset; + } + + return 0; +} + static int _regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len) { @@ -563,6 +830,10 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, } } + ret = _regmap_select_page(map, ®, val_len / map->format.val_bytes); + if (ret < 0) + return ret; + map->format.format_reg(map->work_buf, reg, map->reg_shift); u8[0] |= map->write_flag_mask; @@ -623,9 +894,18 @@ int _regmap_write(struct regmap *map, unsigned int reg, } } +#ifdef LOG_DEVICE + if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + dev_info(map->dev, "%x <= %x\n", reg, val); +#endif + trace_regmap_reg_write(map->dev, reg, val); if (map->format.format_write) { + ret = _regmap_select_page(map, ®, 1); + if (ret < 0) + return ret; + map->format.format_write(map, reg, val); trace_regmap_hw_write_start(map->dev, reg, 1); @@ -783,6 +1063,10 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, u8 *u8 = map->work_buf; int ret; + ret = _regmap_select_page(map, ®, val_len / map->format.val_bytes); + if (ret < 0) + return ret; + map->format.format_reg(map->work_buf, reg, map->reg_shift); /* @@ -826,6 +1110,12 @@ static int _regmap_read(struct regmap *map, unsigned int reg, ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); if (ret == 0) { *val = map->format.parse_val(map->work_buf); + +#ifdef LOG_DEVICE + if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + dev_info(map->dev, "%x => %x\n", reg, *val); +#endif + trace_regmap_reg_read(map->dev, reg, *val); } @@ -982,11 +1272,9 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, int ret; unsigned int tmp, orig; - map->lock(map); - ret = _regmap_read(map, reg, &orig); if (ret != 0) - goto out; + return ret; tmp = orig & ~mask; tmp |= val & mask; @@ -998,9 +1286,6 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, *change = false; } -out: - map->unlock(map); - return ret; } @@ -1018,7 +1303,13 @@ int regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val) { bool change; - return _regmap_update_bits(map, reg, mask, val, &change); + int ret; + + map->lock(map); + ret = _regmap_update_bits(map, reg, mask, val, &change); + map->unlock(map); + + return ret; } EXPORT_SYMBOL_GPL(regmap_update_bits); @@ -1038,7 +1329,12 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change) { - return _regmap_update_bits(map, reg, mask, val, change); + int ret; + + map->lock(map); + ret = _regmap_update_bits(map, reg, mask, val, change); + map->unlock(map); + return ret; } EXPORT_SYMBOL_GPL(regmap_update_bits_check); |