diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_idle.c | 7 | ||||
-rw-r--r-- | drivers/base/cpu.c | 1 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 13 | ||||
-rw-r--r-- | drivers/base/power/common.c | 2 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 13 | ||||
-rw-r--r-- | drivers/base/power/main.c | 11 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 72 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 17 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 2 | ||||
-rw-r--r-- | drivers/cpuidle/Kconfig | 11 | ||||
-rw-r--r-- | drivers/cpuidle/dt_idle_states.c | 15 | ||||
-rw-r--r-- | drivers/cpuidle/governors/Makefile | 1 | ||||
-rw-r--r-- | drivers/cpuidle/governors/teo.c | 444 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.h | 4 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 1 | ||||
-rw-r--r-- | drivers/powercap/intel_rapl.c | 2 |
17 files changed, 572 insertions, 60 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b2131c4ea124..98d4ec5bf450 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -282,6 +282,13 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); + snprintf(pr->power.states[ACPI_STATE_C2].desc, + ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", + pr->power.states[ACPI_STATE_C2].address); + snprintf(pr->power.states[ACPI_STATE_C3].desc, + ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", + pr->power.states[ACPI_STATE_C3].address); + return 0; } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index eb9443d5bae1..6ce93a52bf3f 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -427,6 +427,7 @@ __cpu_device_create(struct device *parent, void *drvdata, dev->parent = parent; dev->groups = groups; dev->release = device_create_release; + device_set_pm_not_required(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5a42ae4078c2..365ad751ce0f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; } else { - clk_prepare(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", - ce->clk, ce->con_id); + if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, + "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); + } } } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b413951c6abc..22aedb28aad7 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); * For a detailed function description, see dev_pm_domain_attach_by_id(). */ struct device *dev_pm_domain_attach_by_name(struct device *dev, - char *name) + const char *name) { if (dev->pm_domain) return ERR_PTR(-EEXIST); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 500de1dee967..2c334c01fc43 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); * power-domain-names DT property. For further description see * genpd_dev_pm_attach_by_id(). */ -struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) +struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) { int index; @@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void) genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); - if (!genpd_debugfs_dir) - return -ENOMEM; - - d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - genpd_debugfs_dir, NULL, &summary_fops); - if (!d) - return -ENOMEM; + debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, + NULL, &summary_fops); list_for_each_entry(genpd, &gpd_list, gpd_list_node) { d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); - if (!d) - return -ENOMEM; debugfs_create_file("current_state", 0444, d, genpd, &status_fops); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0992e67e862b..893ae464bfd6 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -124,6 +124,10 @@ void device_pm_unlock(void) */ void device_pm_add(struct device *dev) { + /* Skip PM setup/initialization. */ + if (device_pm_not_required(dev)) + return; + pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); device_pm_check_callbacks(dev); @@ -142,6 +146,9 @@ void device_pm_add(struct device *dev) */ void device_pm_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; + pr_debug("PM: Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); @@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev)) { + pm_dev_dbg(dev, state, "direct-complete "); goto Complete; + } pm_runtime_enable(dev); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0ea2139c50d8..78937c45278c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags); */ void update_pm_runtime_accounting(struct device *dev) { - unsigned long now = jiffies; - unsigned long delta; + u64 now, last, delta; - delta = now - dev->power.accounting_timestamp; + if (dev->power.disable_depth > 0) + return; + + last = dev->power.accounting_timestamp; + now = ktime_get_mono_fast_ns(); dev->power.accounting_timestamp = now; - if (dev->power.disable_depth > 0) + /* + * Because ktime_get_mono_fast_ns() is not monotonic during + * timekeeping updates, ensure that 'now' is after the last saved + * timesptamp. + */ + if (now < last) return; + delta = now - last; + if (dev->power.runtime_status == RPM_SUSPENDED) - dev->power.suspended_jiffies += delta; + dev->power.suspended_time += delta; else - dev->power.active_jiffies += delta; + dev->power.active_time += delta; } static void __update_runtime_status(struct device *dev, enum rpm_status status) @@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) dev->power.runtime_status = status; } +u64 pm_runtime_suspended_time(struct device *dev) +{ + u64 time; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + update_pm_runtime_accounting(dev); + time = dev->power.suspended_time; + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return time; +} +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); + /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. * @dev: Device to handle. @@ -95,7 +121,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - hrtimer_cancel(&dev->power.suspend_timer); + hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev) u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - u64 last_busy, expires = 0; - u64 now = ktime_get_mono_fast_ns(); + u64 expires; if (!dev->power.use_autosuspend) - goto out; + return 0; autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) - goto out; - - last_busy = READ_ONCE(dev->power.last_busy); + return 0; - expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; - if (expires <= now) - expires = 0; /* Already expired. */ + expires = READ_ONCE(dev->power.last_busy); + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; + if (expires > ktime_get_mono_fast_ns()) + return expires; /* Expires in the future */ - out: - return expires; + return 0; } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); @@ -1276,6 +1299,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) pm_runtime_put_noidle(dev); } + /* Update time accounting before disabling PM-runtime. */ + update_pm_runtime_accounting(dev); + if (!dev->power.disable_depth++) __pm_runtime_barrier(dev); @@ -1294,10 +1320,15 @@ void pm_runtime_enable(struct device *dev) spin_lock_irqsave(&dev->power.lock, flags); - if (dev->power.disable_depth > 0) + if (dev->power.disable_depth > 0) { dev->power.disable_depth--; - else + + /* About to enable runtime pm, set accounting_timestamp to now */ + if (!dev->power.disable_depth) + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); + } else { dev_warn(dev, "Unbalanced %s!\n", __func__); + } WARN(!dev->power.disable_depth && dev->power.runtime_status == RPM_SUSPENDED && @@ -1494,7 +1525,6 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d713738ce796..c6bf76124184 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; + u64 tmp; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + tmp = dev->power.active_time; + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); spin_unlock_irq(&dev->power.lock); return ret; } @@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; + u64 tmp; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", - jiffies_to_msecs(dev->power.suspended_jiffies)); + tmp = dev->power.suspended_time; + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); spin_unlock_irq(&dev->power.lock); return ret; } @@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev) { int rc; + /* No need to create PM sysfs if explicitly disabled. */ + if (device_pm_not_required(dev)) + return 0; + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); if (rc) return rc; @@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3..f1fee72ed970 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** - * pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_dev_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 7e48eb5bf0a7..8caccbbd7353 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -4,7 +4,7 @@ config CPU_IDLE bool "CPU idle PM support" default y if ACPI || PPC_PSERIES select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE) - select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) + select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO help CPU idle is a generic framework for supporting software-controlled idle processor power management. It includes modular cross-platform @@ -23,6 +23,15 @@ config CPU_IDLE_GOV_LADDER config CPU_IDLE_GOV_MENU bool "Menu governor (for tickless system)" +config CPU_IDLE_GOV_TEO + bool "Timer events oriented (TEO) governor (for tickless systems)" + help + This governor implements a simplified idle state selection method + focused on timer events and does not do any interactivity boosting. + + Some workloads benefit from using it and it generally should be safe + to use. Say Y here if you are not happy with the alternatives. + config DT_IDLE_STATES bool diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 53342b7f1010..add9569636b5 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -22,16 +22,12 @@ #include "dt_idle_states.h" static int init_state_node(struct cpuidle_state *idle_state, - const struct of_device_id *matches, + const struct of_device_id *match_id, struct device_node *state_node) { int err; - const struct of_device_id *match_id; const char *desc; - match_id = of_match_node(matches, state_node); - if (!match_id) - return -ENODEV; /* * CPUidle drivers are expected to initialize the const void *data * pointer of the passed in struct of_device_id array to the idle @@ -160,6 +156,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, { struct cpuidle_state *idle_state; struct device_node *state_node, *cpu_node; + const struct of_device_id *match_id; int i, err = 0; const cpumask_t *cpumask; unsigned int state_idx = start_idx; @@ -180,6 +177,12 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, if (!state_node) break; + match_id = of_match_node(matches, state_node); + if (!match_id) { + err = -ENODEV; + break; + } + if (!of_device_is_available(state_node)) { of_node_put(state_node); continue; @@ -198,7 +201,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, } idle_state = &drv->states[state_idx++]; - err = init_state_node(idle_state, matches, state_node); + err = init_state_node(idle_state, match_id, state_node); if (err) { pr_err("Parsing idle state node %pOF failed with err %d\n", state_node, err); diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile index 1b512722689f..4d8aff5248a8 100644 --- a/drivers/cpuidle/governors/Makefile +++ b/drivers/cpuidle/governors/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o +obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c new file mode 100644 index 000000000000..7d05efdbd3c6 --- /dev/null +++ b/drivers/cpuidle/governors/teo.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Timer events oriented CPU idle governor + * + * Copyright (C) 2018 Intel Corporation + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * + * The idea of this governor is based on the observation that on many systems + * timer events are two or more orders of magnitude more frequent than any + * other interrupts, so they are likely to be the most significant source of CPU + * wakeups from idle states. Moreover, information about what happened in the + * (relatively recent) past can be used to estimate whether or not the deepest + * idle state with target residency within the time to the closest timer is + * likely to be suitable for the upcoming idle time of the CPU and, if not, then + * which of the shallower idle states to choose. + * + * Of course, non-timer wakeup sources are more important in some use cases and + * they can be covered by taking a few most recent idle time intervals of the + * CPU into account. However, even in that case it is not necessary to consider + * idle duration values greater than the time till the closest timer, as the + * patterns that they may belong to produce average values close enough to + * the time till the closest timer (sleep length) anyway. + * + * Thus this governor estimates whether or not the upcoming idle time of the CPU + * is likely to be significantly shorter than the sleep length and selects an + * idle state for it in accordance with that, as follows: + * + * - Find an idle state on the basis of the sleep length and state statistics + * collected over time: + * + * o Find the deepest idle state whose target residency is less than or equal + * to the sleep length. + * + * o Select it if it matched both the sleep length and the observed idle + * duration in the past more often than it matched the sleep length alone + * (i.e. the observed idle duration was significantly shorter than the sleep + * length matched by it). + * + * o Otherwise, select the shallower state with the greatest matched "early" + * wakeups metric. + * + * - If the majority of the most recent idle duration values are below the + * target residency of the idle state selected so far, use those values to + * compute the new expected idle duration and find an idle state matching it + * (which has to be shallower than the one selected so far). + */ + +#include <linux/cpuidle.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/sched/clock.h> +#include <linux/tick.h> + +/* + * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value + * is used for decreasing metrics on a regular basis. + */ +#define PULSE 1024 +#define DECAY_SHIFT 3 + +/* + * Number of the most recent idle duration values to take into consideration for + * the detection of wakeup patterns. + */ +#define INTERVALS 8 + +/** + * struct teo_idle_state - Idle state data used by the TEO cpuidle governor. + * @early_hits: "Early" CPU wakeups "matching" this state. + * @hits: "On time" CPU wakeups "matching" this state. + * @misses: CPU wakeups "missing" this state. + * + * A CPU wakeup is "matched" by a given idle state if the idle duration measured + * after the wakeup is between the target residency of that state and the target + * residency of the next one (or if this is the deepest available idle state, it + * "matches" a CPU wakeup when the measured idle duration is at least equal to + * its target residency). + * + * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if + * it occurs significantly earlier than the closest expected timer event (that + * is, early enough to match an idle state shallower than the one matching the + * time till the closest timer event). Otherwise, the wakeup is "on time", or + * it is a "hit". + * + * A "miss" occurs when the given state doesn't match the wakeup, but it matches + * the time till the closest timer event used for idle state selection. + */ +struct teo_idle_state { + unsigned int early_hits; + unsigned int hits; + unsigned int misses; +}; + +/** + * struct teo_cpu - CPU data used by the TEO cpuidle governor. + * @time_span_ns: Time between idle state selection and post-wakeup update. + * @sleep_length_ns: Time till the closest timer event (at the selection time). + * @states: Idle states data corresponding to this CPU. + * @last_state: Idle state entered by the CPU last time. + * @interval_idx: Index of the most recent saved idle interval. + * @intervals: Saved idle duration values. + */ +struct teo_cpu { + u64 time_span_ns; + u64 sleep_length_ns; + struct teo_idle_state states[CPUIDLE_STATE_MAX]; + int last_state; + int interval_idx; + unsigned int intervals[INTERVALS]; +}; + +static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); + +/** + * teo_update - Update CPU data after wakeup. + * @drv: cpuidle driver containing state data. + * @dev: Target CPU. + */ +static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns); + int i, idx_hit = -1, idx_timer = -1; + unsigned int measured_us; + + if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { + /* + * One of the safety nets has triggered or this was a timer + * wakeup (or equivalent). + */ + measured_us = sleep_length_us; + } else { + unsigned int lat = drv->states[cpu_data->last_state].exit_latency; + + measured_us = ktime_to_us(cpu_data->time_span_ns); + /* + * The delay between the wakeup and the first instruction + * executed by the CPU is not likely to be worst-case every + * time, so take 1/2 of the exit latency as a very rough + * approximation of the average of it. + */ + if (measured_us >= lat) + measured_us -= lat / 2; + else + measured_us /= 2; + } + + /* + * Decay the "early hits" metric for all of the states and find the + * states matching the sleep length and the measured idle duration. + */ + for (i = 0; i < drv->state_count; i++) { + unsigned int early_hits = cpu_data->states[i].early_hits; + + cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT; + + if (drv->states[i].target_residency <= sleep_length_us) { + idx_timer = i; + if (drv->states[i].target_residency <= measured_us) + idx_hit = i; + } + } + + /* + * Update the "hits" and "misses" data for the state matching the sleep + * length. If it matches the measured idle duration too, this is a hit, + * so increase the "hits" metric for it then. Otherwise, this is a + * miss, so increase the "misses" metric for it. In the latter case + * also increase the "early hits" metric for the state that actually + * matches the measured idle duration. + */ + if (idx_timer >= 0) { + unsigned int hits = cpu_data->states[idx_timer].hits; + unsigned int misses = cpu_data->states[idx_timer].misses; + + hits -= hits >> DECAY_SHIFT; + misses -= misses >> DECAY_SHIFT; + + if (idx_timer > idx_hit) { + misses += PULSE; + if (idx_hit >= 0) + cpu_data->states[idx_hit].early_hits += PULSE; + } else { + hits += PULSE; + } + + cpu_data->states[idx_timer].misses = misses; + cpu_data->states[idx_timer].hits = hits; + } + + /* + * If the total time span between idle state selection and the "reflect" + * callback is greater than or equal to the sleep length determined at + * the idle state selection time, the wakeup is likely to be due to a + * timer event. + */ + if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) + measured_us = UINT_MAX; + + /* + * Save idle duration values corresponding to non-timer wakeups for + * pattern detection. + */ + cpu_data->intervals[cpu_data->interval_idx++] = measured_us; + if (cpu_data->interval_idx > INTERVALS) + cpu_data->interval_idx = 0; +} + +/** + * teo_find_shallower_state - Find shallower idle state matching given duration. + * @drv: cpuidle driver containing state data. + * @dev: Target CPU. + * @state_idx: Index of the capping idle state. + * @duration_us: Idle duration value to match. + */ +static int teo_find_shallower_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev, int state_idx, + unsigned int duration_us) +{ + int i; + + for (i = state_idx - 1; i >= 0; i--) { + if (drv->states[i].disabled || dev->states_usage[i].disable) + continue; + + state_idx = i; + if (drv->states[i].target_residency <= duration_us) + break; + } + return state_idx; +} + +/** + * teo_select - Selects the next idle state to enter. + * @drv: cpuidle driver containing state data. + * @dev: Target CPU. + * @stop_tick: Indication on whether or not to stop the scheduler tick. + */ +static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + int latency_req = cpuidle_governor_latency_req(dev->cpu); + unsigned int duration_us, count; + int max_early_idx, idx, i; + ktime_t delta_tick; + + if (cpu_data->last_state >= 0) { + teo_update(drv, dev); + cpu_data->last_state = -1; + } + + cpu_data->time_span_ns = local_clock(); + + cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick); + duration_us = ktime_to_us(cpu_data->sleep_length_ns); + + count = 0; + max_early_idx = -1; + idx = -1; + + for (i = 0; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; + struct cpuidle_state_usage *su = &dev->states_usage[i]; + + if (s->disabled || su->disable) { + /* + * If the "early hits" metric of a disabled state is + * greater than the current maximum, it should be taken + * into account, because it would be a mistake to select + * a deeper state with lower "early hits" metric. The + * index cannot be changed to point to it, however, so + * just increase the max count alone and let the index + * still point to a shallower idle state. + */ + if (max_early_idx >= 0 && + count < cpu_data->states[i].early_hits) + count = cpu_data->states[i].early_hits; + + continue; + } + + if (idx < 0) + idx = i; /* first enabled state */ + + if (s->target_residency > duration_us) + break; + + if (s->exit_latency > latency_req) { + /* + * If we break out of the loop for latency reasons, use + * the target residency of the selected state as the + * expected idle duration to avoid stopping the tick + * as long as that target residency is low enough. + */ + duration_us = drv->states[idx].target_residency; + goto refine; + } + + idx = i; + + if (count < cpu_data->states[i].early_hits && + !(tick_nohz_tick_stopped() && + drv->states[i].target_residency < TICK_USEC)) { + count = cpu_data->states[i].early_hits; + max_early_idx = i; + } + } + + /* + * If the "hits" metric of the idle state matching the sleep length is + * greater than its "misses" metric, that is the one to use. Otherwise, + * it is more likely that one of the shallower states will match the + * idle duration observed after wakeup, so take the one with the maximum + * "early hits" metric, but if that cannot be determined, just use the + * state selected so far. + */ + if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses && + max_early_idx >= 0) { + idx = max_early_idx; + duration_us = drv->states[idx].target_residency; + } + +refine: + if (idx < 0) { + idx = 0; /* No states enabled. Must use 0. */ + } else if (idx > 0) { + u64 sum = 0; + + count = 0; + + /* + * Count and sum the most recent idle duration values less than + * the target residency of the state selected so far, find the + * max. + */ + for (i = 0; i < INTERVALS; i++) { + unsigned int val = cpu_data->intervals[i]; + + if (val >= drv->states[idx].target_residency) + continue; + + count++; + sum += val; + } + + /* + * Give up unless the majority of the most recent idle duration + * values are in the interesting range. + */ + if (count > INTERVALS / 2) { + unsigned int avg_us = div64_u64(sum, count); + + /* + * Avoid spending too much time in an idle state that + * would be too shallow. + */ + if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) { + idx = teo_find_shallower_state(drv, dev, idx, avg_us); + duration_us = avg_us; + } + } + } + + /* + * Don't stop the tick if the selected state is a polling one or if the + * expected idle duration is shorter than the tick period length. + */ + if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || + duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) { + unsigned int delta_tick_us = ktime_to_us(delta_tick); + + *stop_tick = false; + + /* + * The tick is not going to be stopped, so if the target + * residency of the state to be returned is not within the time + * till the closest timer including the tick, try to correct + * that. + */ + if (idx > 0 && drv->states[idx].target_residency > delta_tick_us) + idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us); + } + + return idx; +} + +/** + * teo_reflect - Note that governor data for the CPU need to be updated. + * @dev: Target CPU. + * @state: Entered state. + */ +static void teo_reflect(struct cpuidle_device *dev, int state) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + + cpu_data->last_state = state; + /* + * If the wakeup was not "natural", but triggered by one of the safety + * nets, assume that the CPU might have been idle for the entire sleep + * length time. + */ + if (dev->poll_time_limit || + (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) { + dev->poll_time_limit = false; + cpu_data->time_span_ns = cpu_data->sleep_length_ns; + } else { + cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns; + } +} + +/** + * teo_enable_device - Initialize the governor's data for the target CPU. + * @drv: cpuidle driver (not used). + * @dev: Target CPU. + */ +static int teo_enable_device(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + int i; + + memset(cpu_data, 0, sizeof(*cpu_data)); + + for (i = 0; i < INTERVALS; i++) + cpu_data->intervals[i] = UINT_MAX; + + return 0; +} + +static struct cpuidle_governor teo_governor = { + .name = "teo", + .rating = 19, + .enable = teo_enable_device, + .select = teo_select, + .reflect = teo_reflect, +}; + +static int __init teo_governor_init(void) +{ + return cpuidle_register_governor(&teo_governor); +} + +postcore_initcall(teo_governor_init); diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 017fc602a10e..cf7c66bb3ed9 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -5,6 +5,7 @@ */ #include <linux/irq.h> +#include <linux/pm_runtime.h> #include "i915_pmu.h" #include "intel_ringbuffer.h" #include "i915_drv.h" @@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915) * counter value. */ spin_lock_irqsave(&i915->pmu.lock, flags); - spin_lock(&kdev->power.lock); /* * After the above branch intel_runtime_pm_get_if_in_use failed @@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915) * suspended and if not we cannot do better than report the last * known RC6 value. */ - if (kdev->power.runtime_status == RPM_SUSPENDED) { - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + if (pm_runtime_status_suspended(kdev)) { + val = pm_runtime_suspended_time(kdev); - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_time_last = val; - val = jiffies_to_nsecs(val); + val -= i915->pmu.suspended_time_last; val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; @@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915) val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; } - spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index b3728c5f13e7..4fc4f2478301 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -97,9 +97,9 @@ struct i915_pmu { */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; /** - * @suspended_jiffies_last: Cached suspend time from PM core. + * @suspended_time_last: Cached suspend time from PM core. */ - unsigned long suspended_jiffies_last; + u64 suspended_time_last; /** * @i915_attr: Memory block holding device attributes. */ diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 8b5d85c91e9d..b8647b5c3d4d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1103,6 +1103,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv), + INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv), {} }; diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 6cdb2c14eee4..4347f15165f8 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -1156,6 +1156,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), @@ -1164,6 +1165,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core), INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), |