diff options
Diffstat (limited to 'kernel/power')
-rw-r--r-- | kernel/power/Kconfig | 5 | ||||
-rw-r--r-- | kernel/power/autosleep.c | 2 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 26 | ||||
-rw-r--r-- | kernel/power/main.c | 133 | ||||
-rw-r--r-- | kernel/power/power.h | 2 | ||||
-rw-r--r-- | kernel/power/qos.c | 296 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 57 | ||||
-rw-r--r-- | kernel/power/suspend.c | 74 | ||||
-rw-r--r-- | kernel/power/suspend_test.c | 6 | ||||
-rw-r--r-- | kernel/power/wakelock.c | 32 |
10 files changed, 486 insertions, 147 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index d3667b4075c1..7cbfbeacd68a 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -27,7 +27,10 @@ config SUSPEND_SKIP_SYNC Skip the kernel sys_sync() before freezing user processes. Some systems prefer not to pay this cost on every invocation of suspend, or they are content with invoking sync() from - user-space before invoking suspend. Say Y if that's your case. + user-space before invoking suspend. There's a run-time switch + at '/sys/power/sync_on_suspend' to configure this behaviour. + This setting changes the default for the run-tim switch. Say Y + to change the default to disable the kernel sys_sync(). config HIBERNATE_CALLBACKS bool diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c index 41e83a779e19..9af5a50d3489 100644 --- a/kernel/power/autosleep.c +++ b/kernel/power/autosleep.c @@ -116,7 +116,7 @@ int pm_autosleep_set_state(suspend_state_t state) int __init pm_autosleep_init(void) { - autosleep_ws = wakeup_source_register("autosleep"); + autosleep_ws = wakeup_source_register(NULL, "autosleep"); if (!autosleep_ws) return -ENOMEM; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index cd7434e6000d..6dbeedb7354c 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -9,7 +9,7 @@ * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com> */ -#define pr_fmt(fmt) "PM: " fmt +#define pr_fmt(fmt) "PM: hibernation: " fmt #include <linux/export.h> #include <linux/suspend.h> @@ -30,6 +30,7 @@ #include <linux/ctype.h> #include <linux/genhd.h> #include <linux/ktime.h> +#include <linux/security.h> #include <trace/events/power.h> #include "power.h" @@ -68,7 +69,7 @@ static const struct platform_hibernation_ops *hibernation_ops; bool hibernation_available(void) { - return (nohibernate == 0); + return nohibernate == 0 && !security_locked_down(LOCKDOWN_HIBERNATION); } /** @@ -105,7 +106,7 @@ EXPORT_SYMBOL(system_entering_hibernation); #ifdef CONFIG_PM_DEBUG static void hibernation_debug_sleep(void) { - pr_info("hibernation debug: Waiting for 5 seconds.\n"); + pr_info("debug: Waiting for 5 seconds.\n"); mdelay(5000); } @@ -276,7 +277,7 @@ static int create_image(int platform_mode) error = dpm_suspend_end(PMSG_FREEZE); if (error) { - pr_err("Some devices failed to power down, aborting hibernation\n"); + pr_err("Some devices failed to power down, aborting\n"); return error; } @@ -294,7 +295,7 @@ static int create_image(int platform_mode) error = syscore_suspend(); if (error) { - pr_err("Some system devices failed to power down, aborting hibernation\n"); + pr_err("Some system devices failed to power down, aborting\n"); goto Enable_irqs; } @@ -309,7 +310,7 @@ static int create_image(int platform_mode) restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) - pr_err("Error %d creating hibernation image\n", error); + pr_err("Error %d creating image\n", error); if (!in_suspend) { events_check_enabled = false; @@ -679,7 +680,7 @@ static int load_image_and_restore(void) if (!error) hibernation_restore(flags & SF_PLATFORM_MODE); - pr_err("Failed to load hibernation image, recovering.\n"); + pr_err("Failed to load image, recovering.\n"); swsusp_free(); free_basic_memory_bitmaps(); Unlock: @@ -742,7 +743,7 @@ int hibernate(void) else flags |= SF_CRC32_MODE; - pm_pr_dbg("Writing image.\n"); + pm_pr_dbg("Writing hibernation image.\n"); error = swsusp_write(flags); swsusp_free(); if (!error) { @@ -754,7 +755,7 @@ int hibernate(void) in_suspend = 0; pm_restore_gfp_mask(); } else { - pm_pr_dbg("Image restored successfully.\n"); + pm_pr_dbg("Hibernation image restored successfully.\n"); } Free_bitmaps: @@ -893,7 +894,7 @@ static int software_resume(void) goto Close_Finish; } - pm_pr_dbg("Preparing processes for restore.\n"); + pm_pr_dbg("Preparing processes for hibernation restore.\n"); error = freeze_processes(); if (error) goto Close_Finish; @@ -902,7 +903,7 @@ static int software_resume(void) Finish: __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); pm_restore_console(); - pr_info("resume from hibernation failed (%d)\n", error); + pr_info("resume failed (%d)\n", error); atomic_inc(&snapshot_device_available); /* For success case, the suspend path will release the lock */ Unlock: @@ -1067,7 +1068,8 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, lock_system_sleep(); swsusp_resume_device = res; unlock_system_sleep(); - pm_pr_dbg("Configured resume from disk to %u\n", swsusp_resume_device); + pm_pr_dbg("Configured hibernation resume from disk to %u\n", + swsusp_resume_device); noresume = 0; software_resume(); return n; diff --git a/kernel/power/main.c b/kernel/power/main.c index bdbd605c4215..69b7a8aeca3b 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -15,6 +15,7 @@ #include <linux/seq_file.h> #include <linux/suspend.h> #include <linux/syscalls.h> +#include <linux/pm_runtime.h> #include "power.h" @@ -189,6 +190,38 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr } power_attr(mem_sleep); + +/* + * sync_on_suspend: invoke ksys_sync_helper() before suspend. + * + * show() returns whether ksys_sync_helper() is invoked before suspend. + * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it. + */ +bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC); + +static ssize_t sync_on_suspend_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", sync_on_suspend_enabled); +} + +static ssize_t sync_on_suspend_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned long val; + + if (kstrtoul(buf, 10, &val)) + return -EINVAL; + + if (val > 1) + return -EINVAL; + + sync_on_suspend_enabled = !!val; + return n; +} + +power_attr(sync_on_suspend); #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_PM_SLEEP_DEBUG @@ -254,7 +287,6 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, power_attr(pm_test); #endif /* CONFIG_PM_SLEEP_DEBUG */ -#ifdef CONFIG_DEBUG_FS static char *suspend_step_name(enum suspend_stat_step step) { switch (step) { @@ -275,6 +307,92 @@ static char *suspend_step_name(enum suspend_stat_step step) } } +#define suspend_attr(_name) \ +static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%d\n", suspend_stats._name); \ +} \ +static struct kobj_attribute _name = __ATTR_RO(_name) + +suspend_attr(success); +suspend_attr(fail); +suspend_attr(failed_freeze); +suspend_attr(failed_prepare); +suspend_attr(failed_suspend); +suspend_attr(failed_suspend_late); +suspend_attr(failed_suspend_noirq); +suspend_attr(failed_resume); +suspend_attr(failed_resume_early); +suspend_attr(failed_resume_noirq); + +static ssize_t last_failed_dev_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int index; + char *last_failed_dev = NULL; + + index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; + index %= REC_FAILED_NUM; + last_failed_dev = suspend_stats.failed_devs[index]; + + return sprintf(buf, "%s\n", last_failed_dev); +} +static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev); + +static ssize_t last_failed_errno_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int index; + int last_failed_errno; + + index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; + index %= REC_FAILED_NUM; + last_failed_errno = suspend_stats.errno[index]; + + return sprintf(buf, "%d\n", last_failed_errno); +} +static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno); + +static ssize_t last_failed_step_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int index; + enum suspend_stat_step step; + char *last_failed_step = NULL; + + index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; + index %= REC_FAILED_NUM; + step = suspend_stats.failed_steps[index]; + last_failed_step = suspend_step_name(step); + + return sprintf(buf, "%s\n", last_failed_step); +} +static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step); + +static struct attribute *suspend_attrs[] = { + &success.attr, + &fail.attr, + &failed_freeze.attr, + &failed_prepare.attr, + &failed_suspend.attr, + &failed_suspend_late.attr, + &failed_suspend_noirq.attr, + &failed_resume.attr, + &failed_resume_early.attr, + &failed_resume_noirq.attr, + &last_failed_dev.attr, + &last_failed_errno.attr, + &last_failed_step.attr, + NULL, +}; + +static struct attribute_group suspend_attr_group = { + .name = "suspend_stats", + .attrs = suspend_attrs, +}; + +#ifdef CONFIG_DEBUG_FS static int suspend_stats_show(struct seq_file *s, void *unused) { int i, index, last_dev, last_errno, last_step; @@ -495,7 +613,7 @@ static suspend_state_t decode_state(const char *buf, size_t n) len = p ? p - buf : n; /* Check hibernation first. */ - if (len == 4 && !strncmp(buf, "disk", len)) + if (len == 4 && str_has_prefix(buf, "disk")) return PM_SUSPEND_MAX; #ifdef CONFIG_SUSPEND @@ -769,6 +887,7 @@ static struct attribute * g[] = { &wakeup_count_attr.attr, #ifdef CONFIG_SUSPEND &mem_sleep_attr.attr, + &sync_on_suspend_attr.attr, #endif #ifdef CONFIG_PM_AUTOSLEEP &autosleep_attr.attr, @@ -794,6 +913,14 @@ static const struct attribute_group attr_group = { .attrs = g, }; +static const struct attribute_group *attr_groups[] = { + &attr_group, +#ifdef CONFIG_PM_SLEEP + &suspend_attr_group, +#endif + NULL, +}; + struct workqueue_struct *pm_wq; EXPORT_SYMBOL_GPL(pm_wq); @@ -815,7 +942,7 @@ static int __init pm_init(void) power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; - error = sysfs_create_group(power_kobj, &attr_group); + error = sysfs_create_groups(power_kobj, attr_groups); if (error) return error; pm_print_times_init(); diff --git a/kernel/power/power.h b/kernel/power/power.h index 44bee462ff57..7cdc64dc2373 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -179,7 +179,7 @@ extern void swsusp_close(fmode_t); extern int swsusp_unmark(void); #endif -struct timeval; +struct __kernel_old_timeval; /* kernel/power/swsusp.c */ extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *); diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 33e3febaba53..83edf8698118 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -78,57 +78,9 @@ static struct pm_qos_object cpu_dma_pm_qos = { .name = "cpu_dma_latency", }; -static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); -static struct pm_qos_constraints network_lat_constraints = { - .list = PLIST_HEAD_INIT(network_lat_constraints.list), - .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, - .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, - .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, - .type = PM_QOS_MIN, - .notifiers = &network_lat_notifier, -}; -static struct pm_qos_object network_lat_pm_qos = { - .constraints = &network_lat_constraints, - .name = "network_latency", -}; - - -static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); -static struct pm_qos_constraints network_tput_constraints = { - .list = PLIST_HEAD_INIT(network_tput_constraints.list), - .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, - .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, - .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, - .type = PM_QOS_MAX, - .notifiers = &network_throughput_notifier, -}; -static struct pm_qos_object network_throughput_pm_qos = { - .constraints = &network_tput_constraints, - .name = "network_throughput", -}; - - -static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier); -static struct pm_qos_constraints memory_bw_constraints = { - .list = PLIST_HEAD_INIT(memory_bw_constraints.list), - .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, - .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, - .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, - .type = PM_QOS_SUM, - .notifiers = &memory_bandwidth_notifier, -}; -static struct pm_qos_object memory_bandwidth_pm_qos = { - .constraints = &memory_bw_constraints, - .name = "memory_bandwidth", -}; - - static struct pm_qos_object *pm_qos_array[] = { &null_pm_qos, &cpu_dma_pm_qos, - &network_lat_pm_qos, - &network_throughput_pm_qos, - &memory_bandwidth_pm_qos, }; static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, @@ -698,3 +650,251 @@ static int __init pm_qos_power_init(void) } late_initcall(pm_qos_power_init); + +/* Definitions related to the frequency QoS below. */ + +/** + * freq_constraints_init - Initialize frequency QoS constraints. + * @qos: Frequency QoS constraints to initialize. + */ +void freq_constraints_init(struct freq_constraints *qos) +{ + struct pm_qos_constraints *c; + + c = &qos->min_freq; + plist_head_init(&c->list); + c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE; + c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE; + c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE; + c->type = PM_QOS_MAX; + c->notifiers = &qos->min_freq_notifiers; + BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); + + c = &qos->max_freq; + plist_head_init(&c->list); + c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE; + c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE; + c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE; + c->type = PM_QOS_MIN; + c->notifiers = &qos->max_freq_notifiers; + BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); +} + +/** + * freq_qos_read_value - Get frequency QoS constraint for a given list. + * @qos: Constraints to evaluate. + * @type: QoS request type. + */ +s32 freq_qos_read_value(struct freq_constraints *qos, + enum freq_qos_req_type type) +{ + s32 ret; + + switch (type) { + case FREQ_QOS_MIN: + ret = IS_ERR_OR_NULL(qos) ? + FREQ_QOS_MIN_DEFAULT_VALUE : + pm_qos_read_value(&qos->min_freq); + break; + case FREQ_QOS_MAX: + ret = IS_ERR_OR_NULL(qos) ? + FREQ_QOS_MAX_DEFAULT_VALUE : + pm_qos_read_value(&qos->max_freq); + break; + default: + WARN_ON(1); + ret = 0; + } + + return ret; +} + +/** + * freq_qos_apply - Add/modify/remove frequency QoS request. + * @req: Constraint request to apply. + * @action: Action to perform (add/update/remove). + * @value: Value to assign to the QoS request. + * + * This is only meant to be called from inside pm_qos, not drivers. + */ +int freq_qos_apply(struct freq_qos_request *req, + enum pm_qos_req_action action, s32 value) +{ + int ret; + + switch(req->type) { + case FREQ_QOS_MIN: + ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode, + action, value); + break; + case FREQ_QOS_MAX: + ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode, + action, value); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/** + * freq_qos_add_request - Insert new frequency QoS request into a given list. + * @qos: Constraints to update. + * @req: Preallocated request object. + * @type: Request type. + * @value: Request value. + * + * Insert a new entry into the @qos list of requests, recompute the effective + * QoS constraint value for that list and initialize the @req object. The + * caller needs to save that object for later use in updates and removal. + * + * Return 1 if the effective constraint value has changed, 0 if the effective + * constraint value has not changed, or a negative error code on failures. + */ +int freq_qos_add_request(struct freq_constraints *qos, + struct freq_qos_request *req, + enum freq_qos_req_type type, s32 value) +{ + int ret; + + if (IS_ERR_OR_NULL(qos) || !req) + return -EINVAL; + + if (WARN(freq_qos_request_active(req), + "%s() called for active request\n", __func__)) + return -EINVAL; + + req->qos = qos; + req->type = type; + ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value); + if (ret < 0) { + req->qos = NULL; + req->type = 0; + } + + return ret; +} +EXPORT_SYMBOL_GPL(freq_qos_add_request); + +/** + * freq_qos_update_request - Modify existing frequency QoS request. + * @req: Request to modify. + * @new_value: New request value. + * + * Update an existing frequency QoS request along with the effective constraint + * value for the list of requests it belongs to. + * + * Return 1 if the effective constraint value has changed, 0 if the effective + * constraint value has not changed, or a negative error code on failures. + */ +int freq_qos_update_request(struct freq_qos_request *req, s32 new_value) +{ + if (!req) + return -EINVAL; + + if (WARN(!freq_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + if (req->pnode.prio == new_value) + return 0; + + return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value); +} +EXPORT_SYMBOL_GPL(freq_qos_update_request); + +/** + * freq_qos_remove_request - Remove frequency QoS request from its list. + * @req: Request to remove. + * + * Remove the given frequency QoS request from the list of constraints it + * belongs to and recompute the effective constraint value for that list. + * + * Return 1 if the effective constraint value has changed, 0 if the effective + * constraint value has not changed, or a negative error code on failures. + */ +int freq_qos_remove_request(struct freq_qos_request *req) +{ + int ret; + + if (!req) + return -EINVAL; + + if (WARN(!freq_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + req->qos = NULL; + req->type = 0; + + return ret; +} +EXPORT_SYMBOL_GPL(freq_qos_remove_request); + +/** + * freq_qos_add_notifier - Add frequency QoS change notifier. + * @qos: List of requests to add the notifier to. + * @type: Request type. + * @notifier: Notifier block to add. + */ +int freq_qos_add_notifier(struct freq_constraints *qos, + enum freq_qos_req_type type, + struct notifier_block *notifier) +{ + int ret; + + if (IS_ERR_OR_NULL(qos) || !notifier) + return -EINVAL; + + switch (type) { + case FREQ_QOS_MIN: + ret = blocking_notifier_chain_register(qos->min_freq.notifiers, + notifier); + break; + case FREQ_QOS_MAX: + ret = blocking_notifier_chain_register(qos->max_freq.notifiers, + notifier); + break; + default: + WARN_ON(1); + ret = -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL_GPL(freq_qos_add_notifier); + +/** + * freq_qos_remove_notifier - Remove frequency QoS change notifier. + * @qos: List of requests to remove the notifier from. + * @type: Request type. + * @notifier: Notifier block to remove. + */ +int freq_qos_remove_notifier(struct freq_constraints *qos, + enum freq_qos_req_type type, + struct notifier_block *notifier) +{ + int ret; + + if (IS_ERR_OR_NULL(qos) || !notifier) + return -EINVAL; + + switch (type) { + case FREQ_QOS_MIN: + ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers, + notifier); + break; + case FREQ_QOS_MAX: + ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers, + notifier); + break; + default: + WARN_ON(1); + ret = -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL_GPL(freq_qos_remove_notifier); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 83105874f255..ddade80ad276 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -8,7 +8,7 @@ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> */ -#define pr_fmt(fmt) "PM: " fmt +#define pr_fmt(fmt) "PM: hibernation: " fmt #include <linux/version.h> #include <linux/module.h> @@ -734,8 +734,15 @@ zone_found: * We have found the zone. Now walk the radix tree to find the leaf node * for our PFN. */ + + /* + * If the zone we wish to scan is the the current zone and the + * pfn falls into the current node then we do not need to walk + * the tree. + */ node = bm->cur.node; - if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) + if (zone == bm->cur.zone && + ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) goto node_found; node = zone->rtree; @@ -1140,24 +1147,24 @@ void free_basic_memory_bitmaps(void) void clear_free_pages(void) { -#ifdef CONFIG_PAGE_POISONING_ZERO struct memory_bitmap *bm = free_pages_map; unsigned long pfn; if (WARN_ON(!(free_pages_map))) return; - memory_bm_position_reset(bm); - pfn = memory_bm_next_pfn(bm); - while (pfn != BM_END_OF_MAP) { - if (pfn_valid(pfn)) - clear_highpage(pfn_to_page(pfn)); - + if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) { + memory_bm_position_reset(bm); pfn = memory_bm_next_pfn(bm); + while (pfn != BM_END_OF_MAP) { + if (pfn_valid(pfn)) + clear_highpage(pfn_to_page(pfn)); + + pfn = memory_bm_next_pfn(bm); + } + memory_bm_position_reset(bm); + pr_info("free pages cleared after restore\n"); } - memory_bm_position_reset(bm); - pr_info("free pages cleared after restore\n"); -#endif /* PAGE_POISONING_ZERO */ } /** @@ -1559,9 +1566,7 @@ static unsigned long preallocate_image_highmem(unsigned long nr_pages) */ static unsigned long __fraction(u64 x, u64 multiplier, u64 base) { - x *= multiplier; - do_div(x, base); - return (unsigned long)x; + return div64_u64(x * multiplier, base); } static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, @@ -1698,16 +1703,20 @@ int hibernate_preallocate_memory(void) ktime_t start, stop; int error; - pr_info("Preallocating image memory... "); + pr_info("Preallocating image memory\n"); start = ktime_get(); error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); - if (error) + if (error) { + pr_err("Cannot allocate original bitmap\n"); goto err_out; + } error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY); - if (error) + if (error) { + pr_err("Cannot allocate copy bitmap\n"); goto err_out; + } alloc_normal = 0; alloc_highmem = 0; @@ -1797,8 +1806,11 @@ int hibernate_preallocate_memory(void) alloc -= pages; pages += pages_highmem; pages_highmem = preallocate_image_highmem(alloc); - if (pages_highmem < alloc) + if (pages_highmem < alloc) { + pr_err("Image allocation is %lu pages short\n", + alloc - pages_highmem); goto err_out; + } pages += pages_highmem; /* * size is the desired number of saveable pages to leave in @@ -1829,13 +1841,12 @@ int hibernate_preallocate_memory(void) out: stop = ktime_get(); - pr_cont("done (allocated %lu pages)\n", pages); + pr_info("Allocated %lu pages for snapshot\n", pages); swsusp_show_speed(start, stop, pages, "Allocated"); return 0; err_out: - pr_cont("\n"); swsusp_free(); return -ENOMEM; } @@ -1969,7 +1980,7 @@ asmlinkage __visible int swsusp_save(void) { unsigned int nr_pages, nr_highmem; - pr_info("Creating hibernation image:\n"); + pr_info("Creating image:\n"); drain_local_pages(NULL); nr_pages = count_data_pages(); @@ -2003,7 +2014,7 @@ asmlinkage __visible int swsusp_save(void) nr_copy_pages = nr_pages; nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); - pr_info("Hibernation image created (%d pages copied)\n", nr_pages); + pr_info("Image created (%d pages copied)\n", nr_pages); return 0; } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c874a7026e24..8b1bb5ee7e5d 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -121,43 +121,26 @@ static void s2idle_loop(void) { pm_pr_dbg("suspend-to-idle\n"); + /* + * Suspend-to-idle equals: + * frozen processes + suspended devices + idle processors. + * Thus s2idle_enter() should be called right after all devices have + * been suspended. + * + * Wakeups during the noirq suspend of devices may be spurious, so try + * to avoid them upfront. + */ for (;;) { - int error; - - dpm_noirq_begin(); - - /* - * Suspend-to-idle equals - * frozen processes + suspended devices + idle processors. - * Thus s2idle_enter() should be called right after - * all devices have been suspended. - * - * Wakeups during the noirq suspend of devices may be spurious, - * so prevent them from terminating the loop right away. - */ - error = dpm_noirq_suspend_devices(PMSG_SUSPEND); - if (!error) - s2idle_enter(); - else if (error == -EBUSY && pm_wakeup_pending()) - error = 0; - - if (!error && s2idle_ops && s2idle_ops->wake) - s2idle_ops->wake(); - - dpm_noirq_resume_devices(PMSG_RESUME); - - dpm_noirq_end(); - - if (error) - break; - - if (s2idle_ops && s2idle_ops->sync) - s2idle_ops->sync(); - - if (pm_wakeup_pending()) + if (s2idle_ops && s2idle_ops->wake) { + if (s2idle_ops->wake()) + break; + } else if (pm_wakeup_pending()) { break; + } pm_wakeup_clear(false); + + s2idle_enter(); } pm_pr_dbg("resume from suspend-to-idle\n"); @@ -271,14 +254,21 @@ static int platform_suspend_prepare_late(suspend_state_t state) static int platform_suspend_prepare_noirq(suspend_state_t state) { - return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare_late ? - suspend_ops->prepare_late() : 0; + if (state == PM_SUSPEND_TO_IDLE) + return s2idle_ops && s2idle_ops->prepare_late ? + s2idle_ops->prepare_late() : 0; + + return suspend_ops->prepare_late ? suspend_ops->prepare_late() : 0; } static void platform_resume_noirq(suspend_state_t state) { - if (state != PM_SUSPEND_TO_IDLE && suspend_ops->wake) + if (state == PM_SUSPEND_TO_IDLE) { + if (s2idle_ops && s2idle_ops->restore_early) + s2idle_ops->restore_early(); + } else if (suspend_ops->wake) { suspend_ops->wake(); + } } static void platform_resume_early(suspend_state_t state) @@ -415,11 +405,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) if (error) goto Devices_early_resume; - if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) { - s2idle_loop(); - goto Platform_early_resume; - } - error = dpm_suspend_noirq(PMSG_SUSPEND); if (error) { pr_err("noirq suspend of devices failed\n"); @@ -432,6 +417,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) if (suspend_test(TEST_PLATFORM)) goto Platform_wake; + if (state == PM_SUSPEND_TO_IDLE) { + s2idle_loop(); + goto Platform_wake; + } + error = suspend_disable_secondary_cpus(); if (error || suspend_test(TEST_CPUS)) goto Enable_cpus; @@ -575,7 +565,7 @@ static int enter_state(suspend_state_t state) if (state == PM_SUSPEND_TO_IDLE) s2idle_begin(); - if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) { + if (sync_on_suspend_enabled) { trace_suspend_resume(TPS("sync_filesystems"), 0, true); ksys_sync_helper(); trace_suspend_resume(TPS("sync_filesystems"), 0, false); diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 60564b58de07..e1ed58adb69e 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c @@ -70,7 +70,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) static char info_test[] __initdata = KERN_INFO "PM: test RTC wakeup from '%s' suspend\n"; - unsigned long now; + time64_t now; struct rtc_wkalrm alm; int status; @@ -81,10 +81,10 @@ repeat: printk(err_readtime, dev_name(&rtc->dev), status); return; } - rtc_tm_to_time(&alm.time, &now); + now = rtc_tm_to_time64(&alm.time); memset(&alm, 0, sizeof alm); - rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); + rtc_time64_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); alm.enabled = true; status = rtc_set_alarm(rtc, &alm); diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 4210152e56f0..105df4dfc783 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -27,7 +27,7 @@ static DEFINE_MUTEX(wakelocks_lock); struct wakelock { char *name; struct rb_node node; - struct wakeup_source ws; + struct wakeup_source *ws; #ifdef CONFIG_PM_WAKELOCKS_GC struct list_head lru; #endif @@ -46,7 +46,7 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active) for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { wl = rb_entry(node, struct wakelock, node); - if (wl->ws.active == show_active) + if (wl->ws->active == show_active) str += scnprintf(str, end - str, "%s ", wl->name); } if (str > buf) @@ -112,16 +112,16 @@ static void __wakelocks_gc(struct work_struct *work) u64 idle_time_ns; bool active; - spin_lock_irq(&wl->ws.lock); - idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time)); - active = wl->ws.active; - spin_unlock_irq(&wl->ws.lock); + spin_lock_irq(&wl->ws->lock); + idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time)); + active = wl->ws->active; + spin_unlock_irq(&wl->ws->lock); if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC)) break; if (!active) { - wakeup_source_remove(&wl->ws); + wakeup_source_unregister(wl->ws); rb_erase(&wl->node, &wakelocks_tree); list_del(&wl->lru); kfree(wl->name); @@ -187,9 +187,15 @@ static struct wakelock *wakelock_lookup_add(const char *name, size_t len, kfree(wl); return ERR_PTR(-ENOMEM); } - wl->ws.name = wl->name; - wl->ws.last_time = ktime_get(); - wakeup_source_add(&wl->ws); + + wl->ws = wakeup_source_register(NULL, wl->name); + if (!wl->ws) { + kfree(wl->name); + kfree(wl); + return ERR_PTR(-ENOMEM); + } + wl->ws->last_time = ktime_get(); + rb_link_node(&wl->node, parent, node); rb_insert_color(&wl->node, &wakelocks_tree); wakelocks_lru_add(wl); @@ -233,9 +239,9 @@ int pm_wake_lock(const char *buf) u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1; do_div(timeout_ms, NSEC_PER_MSEC); - __pm_wakeup_event(&wl->ws, timeout_ms); + __pm_wakeup_event(wl->ws, timeout_ms); } else { - __pm_stay_awake(&wl->ws); + __pm_stay_awake(wl->ws); } wakelocks_lru_most_recent(wl); @@ -271,7 +277,7 @@ int pm_wake_unlock(const char *buf) ret = PTR_ERR(wl); goto out; } - __pm_relax(&wl->ws); + __pm_relax(wl->ws); wakelocks_lru_most_recent(wl); wakelocks_gc(); |