From b02f6695f7601c4f8442b9cf4636802e7fa8d550 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:35:23 +0100 Subject: PM / QoS: Rename device resume latency QoS items Rename symbols, variables, functions and structure fields related do the resume latency device PM QoS type so that it is clear where they belong (in particular, to avoid confusion with the latency tolerance device PM QoS type introduced by a subsequent changeset). Update the PM QoS documentation to better reflect its current state. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/power.h | 4 ++-- drivers/base/power/qos.c | 55 +++++++++++++++++++++++----------------------- drivers/base/power/sysfs.c | 32 ++++++++++++++------------- 3 files changed, 46 insertions(+), 45 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index cfc3226ec492..a21223d95926 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev); extern void rpm_sysfs_remove(struct device *dev); extern int wakeup_sysfs_add(struct device *dev); extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5c1361a9e5dd..67c0f4219b02 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); s32 __dev_pm_qos_read_value(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->latency); + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); } /** @@ -141,11 +141,11 @@ static int apply_constraint(struct dev_pm_qos_request *req, int ret; switch(req->type) { - case DEV_PM_QOS_LATENCY: - ret = pm_qos_update_target(&qos->latency, &req->data.pnode, - action, value); + case DEV_PM_QOS_RESUME_LATENCY: + ret = pm_qos_update_target(&qos->resume_latency, + &req->data.pnode, action, value); if (ret) { - value = pm_qos_read_value(&qos->latency); + value = pm_qos_read_value(&qos->resume_latency); blocking_notifier_call_chain(&dev_pm_notifiers, (unsigned long)value, req); @@ -186,10 +186,10 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) } BLOCKING_INIT_NOTIFIER_HEAD(n); - c = &qos->latency; + c = &qos->resume_latency; plist_head_init(&c->list); - c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; - c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; @@ -224,7 +224,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); pm_qos_sysfs_remove_flags(dev); mutex_lock(&dev_pm_qos_mtx); @@ -237,7 +237,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) goto out; /* Flush the constraints lists for the device. */ - c = &qos->latency; + c = &qos->resume_latency; plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { /* * Update constraints list and call the notification @@ -341,7 +341,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, return -ENODEV; switch(req->type) { - case DEV_PM_QOS_LATENCY: + case DEV_PM_QOS_RESUME_LATENCY: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -460,8 +460,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ret = dev_pm_qos_constraints_allocate(dev); if (!ret) - ret = blocking_notifier_chain_register( - dev->power.qos->latency.notifiers, notifier); + ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return ret; @@ -487,9 +487,8 @@ int dev_pm_qos_remove_notifier(struct device *dev, /* Silently return if the constraints object is not present. */ if (!IS_ERR_OR_NULL(dev->power.qos)) - retval = blocking_notifier_chain_unregister( - dev->power.qos->latency.notifiers, - notifier); + retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return retval; @@ -543,7 +542,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, if (ancestor) ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_LATENCY, value); + DEV_PM_QOS_RESUME_LATENCY, value); if (ret < 0) req->dev = NULL; @@ -559,9 +558,9 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, struct dev_pm_qos_request *req = NULL; switch(type) { - case DEV_PM_QOS_LATENCY: - req = dev->power.qos->latency_req; - dev->power.qos->latency_req = NULL; + case DEV_PM_QOS_RESUME_LATENCY: + req = dev->power.qos->resume_latency_req; + dev->power.qos->resume_latency_req = NULL; break; case DEV_PM_QOS_FLAGS: req = dev->power.qos->flags_req; @@ -597,7 +596,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (!req) return -ENOMEM; - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); if (ret < 0) { kfree(req); return ret; @@ -609,7 +608,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (IS_ERR_OR_NULL(dev->power.qos)) ret = -ENODEV; - else if (dev->power.qos->latency_req) + else if (dev->power.qos->resume_latency_req) ret = -EEXIST; if (ret < 0) { @@ -618,13 +617,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + dev->power.qos->resume_latency_req = req; mutex_unlock(&dev_pm_qos_mtx); - ret = pm_qos_sysfs_add_latency(dev); + ret = pm_qos_sysfs_add_resume_latency(dev); if (ret) - dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); out: mutex_unlock(&dev_pm_qos_sysfs_mtx); @@ -634,8 +633,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); } /** @@ -646,7 +645,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev) { mutex_lock(&dev_pm_qos_sysfs_mtx); - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 03e089ade5ce..4e24955aac8a 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); + return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); } -static ssize_t pm_qos_latency_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -237,12 +238,13 @@ static ssize_t pm_qos_latency_store(struct device *dev, if (value < 0) return -EINVAL; - ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); + ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, + value); return ret < 0 ? ret : n; } static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, - pm_qos_latency_show, pm_qos_latency_store); + pm_qos_resume_latency_show, pm_qos_resume_latency_store); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, @@ -618,15 +620,15 @@ static struct attribute_group pm_runtime_attr_group = { .attrs = runtime_attrs, }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_resume_latency_us.attr, #endif /* CONFIG_PM_RUNTIME */ NULL, }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { .name = power_group_name, - .attrs = pm_qos_latency_attrs, + .attrs = pm_qos_resume_latency_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { @@ -681,14 +683,14 @@ void wakeup_sysfs_remove(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev) { - return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); + return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev) { - sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); + sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) -- cgit v1.2.1 From 327adaedf2218b0e318eb393aa79cf2be64c199f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:35:29 +0100 Subject: PM / QoS: Add no_constraints_value field to struct pm_qos_constraints Add a new field, no_constraints_value, to struct pm_qos_constraints representing a list of PM QoS constraint requests to be returned by pm_qos_get_value() when that list of requests is empty. That field will be equal to default_value for all of the existing global PM QoS classes and for the resume latency device PM QoS type, but it will be different from default_value for the new latency tolerance device PM QoS type introduced by the next changeset. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 67c0f4219b02..c754e55f9dcb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -190,6 +190,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) plist_head_init(&c->list); c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; -- cgit v1.2.1 From 2d984ad132a87ca2112f81f21039493176a8bca0 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:35:38 +0100 Subject: PM / QoS: Introcuce latency tolerance device PM QoS type Add a new latency tolerance device PM QoS type to be use for specifying active state (RPM_ACTIVE) memory access (DMA) latency tolerance requirements for devices. It may be used to prevent hardware from choosing overly aggressive energy-saving operation modes (causing too much latency to appear) for the whole platform. This feature reqiures hardware support, so it only will be available for devices having a new .set_latency_tolerance() callback in struct dev_pm_info populated, in which case the routine pointed to by it should implement whatever is necessary to transfer the effective requirement value to the hardware. Whenever the effective latency tolerance changes for the device, its .set_latency_tolerance() callback will be executed and the effective value will be passed to it. If that value is negative, which means that the list of latency tolerance requirements for the device is empty, the callback is expected to switch the underlying hardware latency tolerance control mechanism to an autonomous mode if available. If that value is PM_QOS_LATENCY_ANY, in turn, and the hardware supports a special "no requirement" setting, the callback is expected to use it. That allows software to prevent the hardware from automatically updating the device's latency tolerance in response to its power state changes (e.g. during transitions from D3cold to D0), which generally may be done in the autonomous latency tolerance control mode. If .set_latency_tolerance() is present for the device, a new pm_qos_latency_tolerance_us attribute will be present in the devivce's power directory in sysfs. Then, user space can use that attribute to specify its latency tolerance requirement for the device, if any. Writing "any" to it means "no requirement, but do not let the hardware control latency tolerance" and writing "auto" to it allows the hardware to be switched to the autonomous mode if there are no other requirements from the kernel side in the device's list. This changeset includes a fix from Mika Westerberg. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 144 ++++++++++++++++++++++++++++++++++++++------- drivers/base/power/sysfs.c | 65 +++++++++++++++++--- 2 files changed, 180 insertions(+), 29 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index c754e55f9dcb..84756f7f09d9 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -151,6 +151,14 @@ static int apply_constraint(struct dev_pm_qos_request *req, req); } break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + ret = pm_qos_update_target(&qos->latency_tolerance, + &req->data.pnode, action, value); + if (ret) { + value = pm_qos_read_value(&qos->latency_tolerance); + req->dev->power.set_latency_tolerance(req->dev, value); + } + break; case DEV_PM_QOS_FLAGS: ret = pm_qos_update_flags(&qos->flags, &req->data.flr, action, value); @@ -194,6 +202,13 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) c->type = PM_QOS_MIN; c->notifiers = n; + c = &qos->latency_tolerance; + plist_head_init(&c->list); + c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + INIT_LIST_HEAD(&qos->flags.list); spin_lock_irq(&dev->power.lock); @@ -247,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } + c = &qos->latency_tolerance; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } f = &qos->flags; list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -266,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } +static bool dev_pm_qos_invalid_request(struct device *dev, + struct dev_pm_qos_request *req) +{ + return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE + && !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret = 0; + + if (!dev || dev_pm_qos_invalid_request(dev, req)) + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + trace_dev_pm_qos_add_request(dev_name(dev), type, value); + if (!ret) { + req->dev = dev; + req->type = type; + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + } + return ret; +} + /** * dev_pm_qos_add_request - inserts new qos request into the list * @dev: target device for the constraint @@ -291,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { - int ret = 0; - - if (!dev || !req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(dev_pm_qos_request_active(req), - "%s() called for already added request\n", __func__)) - return -EINVAL; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (IS_ERR(dev->power.qos)) - ret = -ENODEV; - else if (!dev->power.qos) - ret = dev_pm_qos_constraints_allocate(dev); - - trace_dev_pm_qos_add_request(dev_name(dev), type, value); - if (!ret) { - req->dev = dev; - req->type = type; - ret = apply_constraint(req, PM_QOS_ADD_REQ, value); - } - + ret = __dev_pm_qos_add_request(dev, req, type, value); mutex_unlock(&dev_pm_qos_mtx); - return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -343,6 +377,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, switch(req->type) { case DEV_PM_QOS_RESUME_LATENCY: + case DEV_PM_QOS_LATENCY_TOLERANCE: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -563,6 +598,10 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, req = dev->power.qos->resume_latency_req; dev->power.qos->resume_latency_req = NULL; break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + req = dev->power.qos->latency_tolerance_req; + dev->power.qos->latency_tolerance_req = NULL; + break; case DEV_PM_QOS_FLAGS: req = dev->power.qos->flags_req; dev->power.qos->flags_req = NULL; @@ -768,6 +807,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) pm_runtime_put(dev); return ret; } + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ + s32 ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req ? + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : + dev->power.qos->latency_tolerance_req->data.pnode.prio; + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req) { + struct dev_pm_qos_request *req; + + if (val < 0) { + ret = -EINVAL; + goto out; + } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); + if (ret < 0) { + kfree(req); + goto out; + } + dev->power.qos->latency_tolerance_req = req; + } else { + if (val < 0) { + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); + ret = 0; + } else { + ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); + } + } + + out: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} #else /* !CONFIG_PM_RUNTIME */ static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} static void __dev_pm_qos_hide_flags(struct device *dev) {} diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 4e24955aac8a..95b181d1ca6d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -246,6 +246,40 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, pm_qos_resume_latency_show, pm_qos_resume_latency_store); +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + + if (value < 0) + return sprintf(buf, "auto\n"); + else if (value == PM_QOS_LATENCY_ANY) + return sprintf(buf, "any\n"); + + return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value)) { + if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + value = PM_QOS_LATENCY_ANY; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, + pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); + static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -631,6 +665,17 @@ static struct attribute_group pm_qos_resume_latency_attr_group = { .attrs = pm_qos_resume_latency_attrs, }; +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = { + .name = power_group_name, + .attrs = pm_qos_latency_tolerance_attrs, +}; + static struct attribute *pm_qos_flags_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_no_power_off.attr, @@ -656,18 +701,23 @@ int dpm_sysfs_add(struct device *dev) if (rc) goto err_out; } - if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); - if (rc) { - if (pm_runtime_callbacks_present(dev)) - sysfs_unmerge_group(&dev->kobj, - &pm_runtime_attr_group); - goto err_out; - } + if (rc) + goto err_runtime; + } + if (dev->power.set_latency_tolerance) { + rc = sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); + if (rc) + goto err_wakeup; } return 0; + err_wakeup: + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; @@ -710,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); -- cgit v1.2.1 From 71d821fdaec08afcbfb3cf258c0d64ea0e336ff3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Feb 2014 00:36:00 +0100 Subject: PM / QoS: Add type to dev_pm_qos_add_ancestor_request() arguments Rework dev_pm_qos_add_ancestor_request() so that device PM QoS type is passed to it as the third argument and make it support the DEV_PM_QOS_LATENCY_TOLERANCE device PM QoS type (in addition to DEV_PM_QOS_RESUME_LATENCY). That will allow the drivers of devices without latency tolerance hardware support to use their ancestors having it as proxies for their latency tolerance requirements. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/qos.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 84756f7f09d9..36b9eb4862cb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -565,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. + * @type: Type of the request. * @value: Constraint latency value. */ int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) { struct device *ancestor = dev->parent; int ret = -ENODEV; - while (ancestor && !ancestor->power.ignore_children) - ancestor = ancestor->parent; + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + while (ancestor && !ancestor->power.set_latency_tolerance) + ancestor = ancestor->parent; + + break; + default: + ancestor = NULL; + } if (ancestor) - ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_RESUME_LATENCY, value); + ret = dev_pm_qos_add_request(ancestor, req, type, value); if (ret < 0) req->dev = NULL; -- cgit v1.2.1 From 7d1af287320b41909c070f68ffe1591060a32769 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 27 Feb 2014 19:26:44 +0100 Subject: PM / domains: Turn latency warning into debug message If devices don't provide latency data, this warning can be quite noisy until the pm domain was enabled and disabled a few times. Turn this warning into a debug message. Signed-off-by: Philipp Zabel Reviewed-by: Ulf Hansson Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index bfb8955c406c..dc127e5dec4b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -42,7 +42,7 @@ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ if (!__retval && __elapsed > __td->field) { \ __td->field = __elapsed; \ - dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ genpd->max_off_time_changed = true; \ __td->constraint_changed = true; \ -- cgit v1.2.1 From 5f59df79837bb809f3945613aba5519cd9755a53 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Sat, 1 Mar 2014 11:56:04 +0100 Subject: PM / runtime: Fetch runtime PM callbacks using a macro While fetching the proper runtime PM callback, we walk the hierarchy of device's power domains, subsystems and drivers. This is common for rpm_suspend(), rpm_idle() and rpm_resume(). Let's clean up the code by using a macro that handles this. Signed-off-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 78 ++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 39 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 72e00e66ecc5..ac495b1357fa 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -13,6 +13,42 @@ #include #include "power.h" +#define RPM_GET_CALLBACK(dev, cb) \ +({ \ + int (*__rpm_cb)(struct device *__d); \ + \ + if (dev->pm_domain) \ + __rpm_cb = dev->pm_domain->ops.cb; \ + else if (dev->type && dev->type->pm) \ + __rpm_cb = dev->type->pm->cb; \ + else if (dev->class && dev->class->pm) \ + __rpm_cb = dev->class->pm->cb; \ + else if (dev->bus && dev->bus->pm) \ + __rpm_cb = dev->bus->pm->cb; \ + else \ + __rpm_cb = NULL; \ + \ + if (!__rpm_cb && dev->driver && dev->driver->pm) \ + __rpm_cb = dev->driver->pm->cb; \ + \ + __rpm_cb; \ +}) + +static int (*rpm_get_suspend_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_suspend); +} + +static int (*rpm_get_resume_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_resume); +} + +static int (*rpm_get_idle_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_idle); +} + static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); @@ -310,19 +346,7 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_idle; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_idle; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_idle; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_idle; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_idle; + callback = rpm_get_idle_cb(dev); if (callback) retval = __rpm_callback(callback, dev); @@ -492,19 +516,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_suspend; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_suspend; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_suspend; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_suspend; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_suspend; + callback = rpm_get_suspend_cb(dev); retval = rpm_callback(callback, dev); if (retval) @@ -724,19 +736,7 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_resume; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_resume; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_resume; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_resume; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_resume; + callback = rpm_get_resume_cb(dev); retval = rpm_callback(callback, dev); if (retval) { -- cgit v1.2.1 From 37f204164dfb0186a0caf20bc3e3120080bcd788 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Sat, 1 Mar 2014 11:56:05 +0100 Subject: PM: Add pm_runtime_suspend|resume_force functions This patch provides two new runtime PM helper functions which intend to be used from system suspend/resume callbacks, to make sure devices are put into low power state during system suspend and brought back to full power at system resume. The prerequisite is to have all levels of a device's runtime PM callbacks to be defined through the SET_PM_RUNTIME_PM_OPS macro, which means these are available for CONFIG_PM. By using the new runtime PM helper functions especially the two scenarios below will be addressed. 1) The PM core prevents .runtime_suspend callbacks from being invoked during system suspend. That means even for a runtime PM centric subsystem and driver, the device needs to be put into low power state from a system suspend callback. Otherwise it may very well be left in full power state (runtime resumed) while the system is suspended. By using the new helper functions, we make sure to walk the hierarchy of a device's power domain, subsystem and driver. 2) Subsystems and drivers need to cope with all the combinations of CONFIG_PM_SLEEP and CONFIG_PM_RUNTIME. The two new helper functions smothly addresses this. Signed-off-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/Makefile | 3 +- drivers/base/power/runtime.c | 84 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 2 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb1f6c0..1cb8544598d5 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,6 +1,5 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o -obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index ac495b1357fa..4776cf528d08 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -44,6 +44,7 @@ static int (*rpm_get_resume_cb(struct device *dev))(struct device *) return RPM_GET_CALLBACK(dev, runtime_resume); } +#ifdef CONFIG_PM_RUNTIME static int (*rpm_get_idle_cb(struct device *dev))(struct device *) { return RPM_GET_CALLBACK(dev, runtime_idle); @@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev) if (dev->power.irq_safe && dev->parent) pm_runtime_put(dev->parent); } +#endif + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke it's .runtime_suspend callback to bring it into + * suspend state. Keep runtime PM disabled to preserve the state unless we + * encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state. + */ +int pm_runtime_force_suspend(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + pm_runtime_disable(dev); + + /* + * Note that pm_runtime_status_suspended() returns false while + * !CONFIG_PM_RUNTIME, which means the device will be put into low + * power state. + */ + if (pm_runtime_status_suspended(dev)) + return 0; + + callback = rpm_get_suspend_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto err; + } + + ret = callback(dev); + if (ret) + goto err; + + pm_runtime_set_suspended(dev); + return 0; +err: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and brings the device into full power. We update the runtime PM + * status and re-enables runtime PM. + * + * Typically this function may be invoked from a system resume callback to make + * sure the device is put into full power state. + */ +int pm_runtime_force_resume(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + callback = rpm_get_resume_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto out; + } + + ret = callback(dev); + if (ret) + goto out; + + pm_runtime_set_active(dev); + pm_runtime_mark_last_busy(dev); +out: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); -- cgit v1.2.1