summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c5
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/cacheinfo.c3
-rw-r--r--drivers/base/core.c89
-rw-r--r--drivers/base/dd.c55
-rw-r--r--drivers/base/devcon.c2
-rw-r--r--drivers/base/devtmpfs.c3
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/firmware_loader/Kconfig18
-rw-r--r--drivers/base/firmware_loader/fallback.c65
-rw-r--r--drivers/base/firmware_loader/fallback_table.c13
-rw-r--r--drivers/base/firmware_loader/firmware.h16
-rw-r--r--drivers/base/firmware_loader/main.c224
-rw-r--r--drivers/base/memory.c219
-rw-r--r--drivers/base/node.c40
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/base/power/domain_governor.c4
-rw-r--r--drivers/base/power/qos.c135
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/regmap/Kconfig2
21 files changed, 679 insertions, 245 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 9b09e31ae82f..63c1e76739f1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -137,7 +137,6 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
sizeof(*raw_capacity),
GFP_KERNEL);
if (!raw_capacity) {
- pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
cap_parsing_failed = true;
return false;
}
@@ -217,10 +216,8 @@ static int __init register_cpufreq_notifier(void)
if (!acpi_disabled || !raw_capacity)
return -EINVAL;
- if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
- pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+ if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
return -ENOMEM;
- }
cpumask_copy(cpus_to_visit, cpu_possible_mask);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 0a58e969f8b7..df3cac739813 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -323,8 +323,8 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
* return to the caller and not iterate over any more devices.
*/
struct device *bus_find_device(struct bus_type *bus,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data))
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data))
{
struct klist_iter i;
struct device *dev;
@@ -342,7 +342,7 @@ struct device *bus_find_device(struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
-static int match_name(struct device *dev, void *data)
+static int match_name(struct device *dev, const void *data)
{
const char *name = data;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 8827c60f51e2..8d553c92cd32 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -660,7 +660,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
static int __init cacheinfo_sysfs_init(void)
{
- return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+ return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
+ "base/cacheinfo:online",
cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
}
device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b4c64528f13c..1669d41fcddc 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1663,6 +1663,9 @@ void device_initialize(struct device *dev)
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
+#ifdef CONFIG_PROVE_LOCKING
+ mutex_init(&dev->lockdep_mutex);
+#endif
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
@@ -1820,12 +1823,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
*/
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
+ unsigned int ref;
+
/* see if we live in a "glue" directory */
if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
- if (!kobject_has_children(glue_dir))
+ /**
+ * There is a race condition between removing glue directory
+ * and adding a new device under the glue directory.
+ *
+ * CPU1: CPU2:
+ *
+ * device_add()
+ * get_device_parent()
+ * class_dir_create_and_add()
+ * kobject_add_internal()
+ * create_dir() // create glue_dir
+ *
+ * device_add()
+ * get_device_parent()
+ * kobject_get() // get glue_dir
+ *
+ * device_del()
+ * cleanup_glue_dir()
+ * kobject_del(glue_dir)
+ *
+ * kobject_add()
+ * kobject_add_internal()
+ * create_dir() // in glue_dir
+ * sysfs_create_dir_ns()
+ * kernfs_create_dir_ns(sd)
+ *
+ * sysfs_remove_dir() // glue_dir->sd=NULL
+ * sysfs_put() // free glue_dir->sd
+ *
+ * // sd is freed
+ * kernfs_new_node(sd)
+ * kernfs_get(glue_dir)
+ * kernfs_add_one()
+ * kernfs_put()
+ *
+ * Before CPU1 remove last child device under glue dir, if CPU2 add
+ * a new device under glue dir, the glue_dir kobject reference count
+ * will be increase to 2 in kobject_get(k). And CPU2 has been called
+ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+ * and sysfs_put(). This result in glue_dir->sd is freed.
+ *
+ * Then the CPU2 will see a stale "empty" but still potentially used
+ * glue dir around in kernfs_new_node().
+ *
+ * In order to avoid this happening, we also should make sure that
+ * kernfs_node for glue_dir is released in CPU1 only when refcount
+ * for glue_dir kobj is 1.
+ */
+ ref = kref_read(&glue_dir->kref);
+ if (!kobject_has_children(glue_dir) && !--ref)
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
@@ -2211,6 +2265,24 @@ void put_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(put_device);
+bool kill_device(struct device *dev)
+{
+ /*
+ * Require the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ lockdep_assert_held(&dev->mutex);
+
+ if (dev->p->dead)
+ return false;
+ dev->p->dead = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
/**
* device_del - delete device from system.
* @dev: device.
@@ -2230,15 +2302,8 @@ void device_del(struct device *dev)
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
- /*
- * Hold the device lock and set the "dead" flag to guarantee that
- * the update behavior is consistent with the other bitfields near
- * it and that we cannot have an asynchronous probe routine trying
- * to run while we are tearing out the bus/class/sysfs from
- * underneath the device.
- */
device_lock(dev);
- dev->p->dead = true;
+ kill_device(dev);
device_unlock(dev);
/* Notify clients of device removal. This call must come
@@ -3356,3 +3421,9 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
dev->of_node_reused = true;
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+
+int device_match_of_node(struct device *dev, const void *np)
+{
+ return dev->of_node == np;
+}
+EXPORT_SYMBOL_GPL(device_match_of_node);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 0df9b4461766..994a90747420 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -235,6 +235,19 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+static int __driver_deferred_probe_check_state(struct device *dev)
+{
+ if (!initcalls_done)
+ return -EPROBE_DEFER;
+
+ if (!deferred_probe_timeout) {
+ dev_WARN(dev, "deferred probe timeout, ignoring dependency");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
/**
* driver_deferred_probe_check_state() - Check deferred probe state
* @dev: device to check
@@ -248,14 +261,40 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
*/
int driver_deferred_probe_check_state(struct device *dev)
{
- if (initcalls_done) {
- if (!deferred_probe_timeout) {
- dev_WARN(dev, "deferred probe timeout, ignoring dependency");
- return -ETIMEDOUT;
- }
- dev_warn(dev, "ignoring dependency for device, assuming no driver");
- return -ENODEV;
- }
+ int ret;
+
+ ret = __driver_deferred_probe_check_state(dev);
+ if (ret < 0)
+ return ret;
+
+ dev_warn(dev, "ignoring dependency for device, assuming no driver");
+
+ return -ENODEV;
+}
+
+/**
+ * driver_deferred_probe_check_state_continue() - check deferred probe state
+ * @dev: device to check
+ *
+ * Returns -ETIMEDOUT if deferred probe debug timeout has expired, or
+ * -EPROBE_DEFER otherwise.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of
+ * directly returning -EPROBE_DEFER.
+ *
+ * This is similar to driver_deferred_probe_check_state(), but it allows the
+ * subsystem to keep deferring probe after built-in drivers have had a chance
+ * to probe. One scenario where that is useful is if built-in drivers rely on
+ * resources that are provided by modular drivers.
+ */
+int driver_deferred_probe_check_state_continue(struct device *dev)
+{
+ int ret;
+
+ ret = __driver_deferred_probe_check_state(dev);
+ if (ret < 0)
+ return ret;
+
return -EPROBE_DEFER;
}
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
index f7035fc12b92..09f28479b243 100644
--- a/drivers/base/devcon.c
+++ b/drivers/base/devcon.c
@@ -133,7 +133,7 @@ static struct bus_type *generic_match_buses[] = {
NULL,
};
-static int device_fwnode_match(struct device *dev, void *fwnode)
+static int device_fwnode_match(struct device *dev, const void *fwnode)
{
return dev_fwnode(dev) == fwnode;
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 0dbc43068eeb..ba5c80903efe 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -357,8 +357,7 @@ int devtmpfs_mount(const char *mntdir)
if (!thread)
return 0;
- err = ksys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT,
- NULL);
+ err = ksys_mount("devtmpfs", mntdir, "devtmpfs", MS_SILENT, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 857c8f1b876e..4e5ca632f35e 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -73,8 +73,8 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
* return to the caller and not iterate over any more devices.
*/
struct device *driver_find_device(struct device_driver *drv,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data))
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data))
{
struct klist_iter i;
struct device *dev;
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 38f2da6f5c2b..3f9e274e2ed3 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -26,6 +26,9 @@ config FW_LOADER
if FW_LOADER
+config FW_LOADER_PAGED_BUF
+ bool
+
config EXTRA_FIRMWARE
string "Build named firmware blobs into the kernel binary"
help
@@ -67,6 +70,7 @@ config EXTRA_FIRMWARE_DIR
config FW_LOADER_USER_HELPER
bool "Enable the firmware sysfs fallback mechanism"
+ select FW_LOADER_PAGED_BUF
help
This option enables a sysfs loading facility to enable firmware
loading to the kernel through userspace as a fallback mechanism
@@ -151,5 +155,19 @@ config FW_LOADER_USER_HELPER_FALLBACK
If you are unsure about this, say N here.
+config FW_LOADER_COMPRESS
+ bool "Enable compressed firmware support"
+ select FW_LOADER_PAGED_BUF
+ select XZ_DEC
+ help
+ This option enables the support for loading compressed firmware
+ files. The caller of firmware API receives the decompressed file
+ content. The compressed file is loaded as a fallback, only after
+ loading the raw file failed at first.
+
+ Currently only XZ-compressed files are supported, and they have to
+ be compressed with either none or crc32 integrity check type (pass
+ "-C crc32" option to xz command).
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index f962488546b6..62ee90b4db56 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -219,20 +219,6 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
-/* one pages buffer should be mapped/unmapped only once */
-static int map_fw_priv_pages(struct fw_priv *fw_priv)
-{
- if (!fw_priv->is_paged_buf)
- return 0;
-
- vunmap(fw_priv->data);
- fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
- PAGE_KERNEL_RO);
- if (!fw_priv->data)
- return -ENOMEM;
- return 0;
-}
-
/**
* firmware_loading_store() - set value in the 'loading' control file
* @dev: device pointer
@@ -254,7 +240,6 @@ static ssize_t firmware_loading_store(struct device *dev,
struct fw_priv *fw_priv;
ssize_t written = count;
int loading = simple_strtol(buf, NULL, 10);
- int i;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
@@ -265,12 +250,7 @@ static ssize_t firmware_loading_store(struct device *dev,
case 1:
/* discarding any previous partial load */
if (!fw_sysfs_done(fw_priv)) {
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- vfree(fw_priv->pages);
- fw_priv->pages = NULL;
- fw_priv->page_array_size = 0;
- fw_priv->nr_pages = 0;
+ fw_free_paged_buf(fw_priv);
fw_state_start(fw_priv);
}
break;
@@ -284,7 +264,7 @@ static ssize_t firmware_loading_store(struct device *dev,
* see the mapped 'buf->data' once the loading
* is completed.
* */
- rc = map_fw_priv_pages(fw_priv);
+ rc = fw_map_paged_buf(fw_priv);
if (rc)
dev_err(dev, "%s: map pages failed\n",
__func__);
@@ -389,40 +369,13 @@ out:
static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
{
- struct fw_priv *fw_priv= fw_sysfs->fw_priv;
- int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
-
- /* If the array of pages is too small, grow it... */
- if (fw_priv->page_array_size < pages_needed) {
- int new_array_size = max(pages_needed,
- fw_priv->page_array_size * 2);
- struct page **new_pages;
+ int err;
- new_pages = vmalloc(array_size(new_array_size, sizeof(void *)));
- if (!new_pages) {
- fw_load_abort(fw_sysfs);
- return -ENOMEM;
- }
- memcpy(new_pages, fw_priv->pages,
- fw_priv->page_array_size * sizeof(void *));
- memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
- (new_array_size - fw_priv->page_array_size));
- vfree(fw_priv->pages);
- fw_priv->pages = new_pages;
- fw_priv->page_array_size = new_array_size;
- }
-
- while (fw_priv->nr_pages < pages_needed) {
- fw_priv->pages[fw_priv->nr_pages] =
- alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
-
- if (!fw_priv->pages[fw_priv->nr_pages]) {
- fw_load_abort(fw_sysfs);
- return -ENOMEM;
- }
- fw_priv->nr_pages++;
- }
- return 0;
+ err = fw_grow_paged_buf(fw_sysfs->fw_priv,
+ PAGE_ALIGN(min_size) >> PAGE_SHIFT);
+ if (err)
+ fw_load_abort(fw_sysfs);
+ return err;
}
/**
@@ -659,7 +612,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
ret = security_kernel_load_data(LOADING_FIRMWARE);
if (ret < 0)
- return ret;
+ return false;
return fw_force_sysfs_fallback(opt_flags);
}
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 776dd69cf5be..ba9d30b28edc 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -16,9 +16,6 @@
* firmware fallback configuration table
*/
-static unsigned int zero;
-static unsigned int one = 1;
-
struct firmware_fallback_config fw_fallback_config = {
.force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
.loading_timeout = 60,
@@ -26,6 +23,7 @@ struct firmware_fallback_config fw_fallback_config = {
};
EXPORT_SYMBOL_GPL(fw_fallback_config);
+#ifdef CONFIG_SYSCTL
struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
@@ -33,8 +31,8 @@ struct ctl_table firmware_config_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "ignore_sysfs_fallback",
@@ -42,9 +40,10 @@ struct ctl_table firmware_config_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
EXPORT_SYMBOL_GPL(firmware_config_table);
+#endif
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 4c1395f8e7ed..7ecd590e67fe 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -64,12 +64,14 @@ struct fw_priv {
void *data;
size_t size;
size_t allocated_size;
-#ifdef CONFIG_FW_LOADER_USER_HELPER
+#ifdef CONFIG_FW_LOADER_PAGED_BUF
bool is_paged_buf;
- bool need_uevent;
struct page **pages;
int nr_pages;
int page_array_size;
+#endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool need_uevent;
struct list_head pending_list;
#endif
const char *fw_name;
@@ -133,4 +135,14 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
int assign_fw(struct firmware *fw, struct device *device,
enum fw_opt opt_flags);
+#ifdef CONFIG_FW_LOADER_PAGED_BUF
+void fw_free_paged_buf(struct fw_priv *fw_priv);
+int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
+int fw_map_paged_buf(struct fw_priv *fw_priv);
+#else
+static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
+static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
+static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+#endif
+
#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 7eaaf5ee5ba6..bf44c79beae9 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -33,6 +33,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/xz.h>
#include <generated/utsrelease.h>
@@ -251,15 +252,7 @@ static void __free_fw_priv(struct kref *ref)
list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- if (fw_priv->is_paged_buf) {
- int i;
- vunmap(fw_priv->data);
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- vfree(fw_priv->pages);
- } else
-#endif
+ fw_free_paged_buf(fw_priv); /* free leftover pages */
if (!fw_priv->allocated_size)
vfree(fw_priv->data);
kfree_const(fw_priv->fw_name);
@@ -274,6 +267,174 @@ static void free_fw_priv(struct fw_priv *fw_priv)
spin_unlock(&fwc->lock);
}
+#ifdef CONFIG_FW_LOADER_PAGED_BUF
+void fw_free_paged_buf(struct fw_priv *fw_priv)
+{
+ int i;
+
+ if (!fw_priv->pages)
+ return;
+
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kvfree(fw_priv->pages);
+ fw_priv->pages = NULL;
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
+}
+
+int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
+{
+ /* If the array of pages is too small, grow it */
+ if (fw_priv->page_array_size < pages_needed) {
+ int new_array_size = max(pages_needed,
+ fw_priv->page_array_size * 2);
+ struct page **new_pages;
+
+ new_pages = kvmalloc_array(new_array_size, sizeof(void *),
+ GFP_KERNEL);
+ if (!new_pages)
+ return -ENOMEM;
+ memcpy(new_pages, fw_priv->pages,
+ fw_priv->page_array_size * sizeof(void *));
+ memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
+ (new_array_size - fw_priv->page_array_size));
+ kvfree(fw_priv->pages);
+ fw_priv->pages = new_pages;
+ fw_priv->page_array_size = new_array_size;
+ }
+
+ while (fw_priv->nr_pages < pages_needed) {
+ fw_priv->pages[fw_priv->nr_pages] =
+ alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+
+ if (!fw_priv->pages[fw_priv->nr_pages])
+ return -ENOMEM;
+ fw_priv->nr_pages++;
+ }
+
+ return 0;
+}
+
+int fw_map_paged_buf(struct fw_priv *fw_priv)
+{
+ /* one pages buffer should be mapped/unmapped only once */
+ if (!fw_priv->pages)
+ return 0;
+
+ vunmap(fw_priv->data);
+ fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
+ PAGE_KERNEL_RO);
+ if (!fw_priv->data)
+ return -ENOMEM;
+
+ /* page table is no longer needed after mapping, let's free */
+ kvfree(fw_priv->pages);
+ fw_priv->pages = NULL;
+
+ return 0;
+}
+#endif
+
+/*
+ * XZ-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS
+/* show an error and return the standard error code */
+static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
+{
+ if (xz_ret != XZ_STREAM_END) {
+ dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
+ return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
+ }
+ return 0;
+}
+
+/* single-shot decompression onto the pre-allocated buffer */
+static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf xz_buf;
+ enum xz_ret xz_ret;
+
+ xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
+ if (!xz_dec)
+ return -ENOMEM;
+
+ xz_buf.in_size = in_size;
+ xz_buf.in = in_buffer;
+ xz_buf.in_pos = 0;
+ xz_buf.out_size = fw_priv->allocated_size;
+ xz_buf.out = fw_priv->data;
+ xz_buf.out_pos = 0;
+
+ xz_ret = xz_dec_run(xz_dec, &xz_buf);
+ xz_dec_end(xz_dec);
+
+ fw_priv->size = xz_buf.out_pos;
+ return fw_decompress_xz_error(dev, xz_ret);
+}
+
+/* decompression on paged buffer and map it */
+static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf xz_buf;
+ enum xz_ret xz_ret;
+ struct page *page;
+ int err = 0;
+
+ xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
+ if (!xz_dec)
+ return -ENOMEM;
+
+ xz_buf.in_size = in_size;
+ xz_buf.in = in_buffer;
+ xz_buf.in_pos = 0;
+
+ fw_priv->is_paged_buf = true;
+ fw_priv->size = 0;
+ do {
+ if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* decompress onto the new allocated page */
+ page = fw_priv->pages[fw_priv->nr_pages - 1];
+ xz_buf.out = kmap(page);
+ xz_buf.out_pos = 0;
+ xz_buf.out_size = PAGE_SIZE;
+ xz_ret = xz_dec_run(xz_dec, &xz_buf);
+ kunmap(page);
+ fw_priv->size += xz_buf.out_pos;
+ /* partial decompression means either end or error */
+ if (xz_buf.out_pos != PAGE_SIZE)
+ break;
+ } while (xz_ret == XZ_OK);
+
+ err = fw_decompress_xz_error(dev, xz_ret);
+ if (!err)
+ err = fw_map_paged_buf(fw_priv);
+
+ out:
+ xz_dec_end(xz_dec);
+ return err;
+}
+
+static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ /* if the buffer is pre-allocated, we can perform in single-shot mode */
+ if (fw_priv->data)
+ return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
+ else
+ return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS */
+
/* direct firmware loading support */
static char fw_path_para[256];
static const char * const fw_path[] = {
@@ -293,7 +454,12 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
static int
-fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
+fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
+ const char *suffix,
+ int (*decompress)(struct device *dev,
+ struct fw_priv *fw_priv,
+ size_t in_size,
+ const void *in_buffer))
{
loff_t size;
int i, len;
@@ -301,9 +467,11 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
char *path;
enum kernel_read_file_id id = READING_FIRMWARE;
size_t msize = INT_MAX;
+ void *buffer = NULL;
/* Already populated data member means we're loading into a buffer */
- if (fw_priv->data) {
+ if (!decompress && fw_priv->data) {
+ buffer = fw_priv->data;
id = READING_FIRMWARE_PREALLOC_BUFFER;
msize = fw_priv->allocated_size;
}
@@ -317,15 +485,15 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
if (!fw_path[i][0])
continue;
- len = snprintf(path, PATH_MAX, "%s/%s",
- fw_path[i], fw_priv->fw_name);
+ len = snprintf(path, PATH_MAX, "%s/%s%s",
+ fw_path[i], fw_priv->fw_name, suffix);
if (len >= PATH_MAX) {
rc = -ENAMETOOLONG;
break;
}
fw_priv->size = 0;
- rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
+ rc = kernel_read_file_from_path(path, &buffer, &size,
msize, id);
if (rc) {
if (rc != -ENOENT)
@@ -336,8 +504,24 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
path);
continue;
}
- dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name);
- fw_priv->size = size;
+ if (decompress) {
+ dev_dbg(device, "f/w decompressing %s\n",
+ fw_priv->fw_name);
+ rc = decompress(device, fw_priv, size, buffer);
+ /* discard the superfluous original content */
+ vfree(buffer);
+ buffer = NULL;
+ if (rc) {
+ fw_free_paged_buf(fw_priv);
+ continue;
+ }
+ } else {
+ dev_dbg(device, "direct-loading %s\n",
+ fw_priv->fw_name);
+ if (!fw_priv->data)
+ fw_priv->data = buffer;
+ fw_priv->size = size;
+ }
fw_state_done(fw_priv);
break;
}
@@ -584,7 +768,13 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
- ret = fw_get_filesystem_firmware(device, fw->priv);
+ ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
+#ifdef CONFIG_FW_LOADER_COMPRESS
+ if (ret == -ENOENT)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
+ fw_decompress_xz);
+#endif
+
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
dev_warn(device,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f180427e48f4..20c39d1bcef8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -34,11 +34,21 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
static int sections_per_block;
-static inline int base_memory_block_id(int section_nr)
+static inline unsigned long base_memory_block_id(unsigned long section_nr)
{
return section_nr / sections_per_block;
}
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return base_memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -126,9 +136,9 @@ static ssize_t phys_index_show(struct device *dev,
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- unsigned long i, pfn;
- int ret = 1;
struct memory_block *mem = to_memory_block(dev);
+ unsigned long pfn;
+ int ret = 1, i;
if (mem->state != MEM_ONLINE)
goto out;
@@ -578,23 +588,13 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
-/*
- * A reference for the returned object is held and the reference for the
- * hinted object is released.
- */
-struct memory_block *find_memory_block_hinted(struct mem_section *section,
- struct memory_block *hint)
+/* A reference for the returned memory block device is acquired. */
+static struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
- int block_id = base_memory_block_id(__section_nr(section));
- struct device *hintdev = hint ? &hint->dev : NULL;
struct device *dev;
- dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
- if (hint)
- put_device(&hint->dev);
- if (!dev)
- return NULL;
- return to_memory_block(dev);
+ dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
+ return dev ? to_memory_block(dev) : NULL;
}
/*
@@ -607,7 +607,9 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
- return find_memory_block_hinted(section, NULL);
+ unsigned long block_id = base_memory_block_id(__section_nr(section));
+
+ return find_memory_block_by_id(block_id);
}
static struct attribute *memory_memblk_attrs[] = {
@@ -652,20 +654,22 @@ int register_memory(struct memory_block *memory)
}
static int init_memory_block(struct memory_block **memory,
- struct mem_section *section, unsigned long state)
+ unsigned long block_id, unsigned long state)
{
struct memory_block *mem;
unsigned long start_pfn;
- int scn_nr;
int ret = 0;
+ mem = find_memory_block_by_id(block_id);
+ if (mem) {
+ put_device(&mem->dev);
+ return -EEXIST;
+ }
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
- scn_nr = __section_nr(section);
- mem->start_section_nr =
- base_memory_block_id(scn_nr) * sections_per_block;
+ mem->start_section_nr = block_id * sections_per_block;
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -677,97 +681,101 @@ static int init_memory_block(struct memory_block **memory,
return ret;
}
-static int add_memory_block(int base_section_nr)
+static int add_memory_block(unsigned long base_section_nr)
{
+ int ret, section_count = 0;
struct memory_block *mem;
- int i, ret, section_count = 0, section_nr;
+ unsigned long nr;
- for (i = base_section_nr;
- i < base_section_nr + sections_per_block;
- i++) {
- if (!present_section_nr(i))
- continue;
- if (section_count == 0)
- section_nr = i;
- section_count++;
- }
+ for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
+ nr++)
+ if (present_section_nr(nr))
+ section_count++;
if (section_count == 0)
return 0;
- ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
+ ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
+ MEM_ONLINE);
if (ret)
return ret;
mem->section_count = section_count;
return 0;
}
+static void unregister_memory(struct memory_block *memory)
+{
+ if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
+ return;
+
+ /* drop the ref. we got via find_memory_block() */
+ put_device(&memory->dev);
+ device_unregister(&memory->dev);
+}
+
/*
- * need an interface for the VM to add new memory regions,
- * but without onlining it.
+ * Create memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * will be initialized as offline.
*/
-int hotplug_memory_register(int nid, struct mem_section *section)
+int create_memory_block_devices(unsigned long start, unsigned long size)
{
- int ret = 0;
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
- mutex_lock(&mem_sysfs_mutex);
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
+ return -EINVAL;
- mem = find_memory_block(section);
- if (mem) {
- mem->section_count++;
- put_device(&mem->dev);
- } else {
- ret = init_memory_block(&mem, section, MEM_OFFLINE);
+ mutex_lock(&mem_sysfs_mutex);
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
- goto out;
- mem->section_count++;
+ break;
+ mem->section_count = sections_per_block;
+ }
+ if (ret) {
+ end_block_id = block_id;
+ for (block_id = start_block_id; block_id != end_block_id;
+ block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ mem->section_count = 0;
+ unregister_memory(mem);
+ }
}
-
-out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-static void
-unregister_memory(struct memory_block *memory)
-{
- BUG_ON(memory->dev.bus != &memory_subsys);
-
- /* drop the ref. we got via find_memory_block() */
- put_device(&memory->dev);
- device_unregister(&memory->dev);
-}
-
-void unregister_memory_section(struct mem_section *section)
+/*
+ * Remove memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * have to be offline.
+ */
+void remove_memory_block_devices(unsigned long start, unsigned long size)
{
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
struct memory_block *mem;
+ unsigned long block_id;
- if (WARN_ON_ONCE(!present_section(section)))
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
return;
mutex_lock(&mem_sysfs_mutex);
-
- /*
- * Some users of the memory hotplug do not want/need memblock to
- * track all sections. Skip over those.
- */
- mem = find_memory_block(section);
- if (!mem)
- goto out_unlock;
-
- unregister_mem_sect_under_nodes(mem, __section_nr(section));
-
- mem->section_count--;
- if (mem->section_count == 0)
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ mem->section_count = 0;
+ unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
- else
- put_device(&mem->dev);
-
-out_unlock:
+ }
mutex_unlock(&mem_sysfs_mutex);
}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
/* return true if the memory block is offlined, otherwise, return false */
bool is_memblock_offlined(struct memory_block *mem)
@@ -804,10 +812,9 @@ static const struct attribute_group *memory_root_attr_groups[] = {
*/
int __init memory_dev_init(void)
{
- unsigned int i;
int ret;
int err;
- unsigned long block_sz;
+ unsigned long block_sz, nr;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
@@ -821,9 +828,9 @@ int __init memory_dev_init(void)
* during boot and have been initialized
*/
mutex_lock(&mem_sysfs_mutex);
- for (i = 0; i <= __highest_present_section_nr;
- i += sections_per_block) {
- err = add_memory_block(i);
+ for (nr = 0; nr <= __highest_present_section_nr;
+ nr += sections_per_block) {
+ err = add_memory_block(nr);
if (!ret)
ret = err;
}
@@ -834,3 +841,43 @@ out:
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
return ret;
}
+
+/**
+ * walk_memory_blocks - walk through all present memory blocks overlapped
+ * by the range [start, start + size)
+ *
+ * @start: start address of the memory range
+ * @size: size of the memory range
+ * @arg: argument passed to func
+ * @func: callback for each memory section walked
+ *
+ * This function walks through all present memory blocks overlapped by the
+ * range [start, start + size), calling func on each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func)
+{
+ const unsigned long start_block_id = phys_to_block_id(start);
+ const unsigned long end_block_id = phys_to_block_id(start + size - 1);
+ struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
+
+ if (!size)
+ return 0;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ ret = func(mem, arg);
+ put_device(&mem->dev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 8598fcbd2a17..75b7e6f6535b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -66,6 +66,7 @@ static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
* @dev: Device for this memory access class
* @list_node: List element in the node's access list
* @access: The access class rank
+ * @hmem_attrs: Heterogeneous memory performance attributes
*/
struct node_access_nodes {
struct device dev;
@@ -673,8 +674,8 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
/**
* register_memory_node_under_compute_node - link memory node to its compute
* node for a given access class.
- * @mem_node: Memory node number
- * @cpu_node: Cpu node number
+ * @mem_nid: Memory node number
+ * @cpu_nid: Cpu node number
* @access: Access class to register
*
* Description:
@@ -752,7 +753,8 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
+static int register_mem_sect_under_node(struct memory_block *mem_blk,
+ void *arg)
{
int ret, nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
@@ -801,23 +803,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
return 0;
}
-/* unregister memory section under all nodes that it spans */
-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index)
+/*
+ * Unregister memory block device under all nodes that it spans.
+ * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes).
+ */
+void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
- NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
unsigned long pfn, sect_start_pfn, sect_end_pfn;
+ static nodemask_t unlinked_nodes;
- if (!mem_blk) {
- NODEMASK_FREE(unlinked_nodes);
- return -EFAULT;
- }
- if (!unlinked_nodes)
- return -ENOMEM;
- nodes_clear(*unlinked_nodes);
-
- sect_start_pfn = section_nr_to_pfn(phys_index);
- sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
+ nodes_clear(unlinked_nodes);
+ sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+ sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int nid;
@@ -826,21 +823,20 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (!node_online(nid))
continue;
- if (node_test_and_set(nid, *unlinked_nodes))
+ if (node_test_and_set(nid, unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
}
- NODEMASK_FREE(unlinked_nodes);
- return 0;
}
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
{
- return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
- register_mem_sect_under_node);
+ return walk_memory_blocks(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
+ register_mem_sect_under_node);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4d1729853d1a..ec974ba9c0c4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
*
- * Please see Documentation/driver-model/platform.txt for more
+ * Please see Documentation/driver-api/driver-model/platform.rst for more
* information.
*/
@@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
* the device will only expose one IRQ, and this fallback
* allows a common code path across either kind of resource.
*/
- if (num == 0 && has_acpi_companion(&dev->dev))
- return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+ if (num == 0 && has_acpi_companion(&dev->dev)) {
+ int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+
+ /* Our callers expect -ENXIO for missing IRQs. */
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
return -ENXIO;
#endif
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 33c30c1e6a30..b063bc41b0a9 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1536,7 +1536,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret)
genpd_free_dev_data(dev, gpd_data);
else
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
return ret;
}
@@ -1569,7 +1570,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
pdd = dev->power.subsys_data->domain_data;
gpd_data = to_gpd_data(pdd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
genpd_lock(genpd);
@@ -1597,7 +1599,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
out:
genpd_unlock(genpd);
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
return ret;
}
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 3838045c9277..daa8c7689f7e 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -33,7 +33,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
* take its current PM QoS constraint (that's the only thing
* known at this point anyway).
*/
- constraint_ns = dev_pm_qos_read_value(dev);
+ constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
constraint_ns *= NSEC_PER_USEC;
}
@@ -66,7 +66,7 @@ static bool default_suspend_ok(struct device *dev)
td->constraint_changed = false;
td->cached_suspend_ok = false;
td->effective_constraint_ns = 0;
- constraint_ns = __dev_pm_qos_read_value(dev);
+ constraint_ns = __dev_pm_qos_resume_latency(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 6c91f8df1d59..6c90fd7e2ff8 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -90,29 +90,49 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
- * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
* This routine must be called with dev->power.lock held.
*/
-s32 __dev_pm_qos_read_value(struct device *dev)
+s32 __dev_pm_qos_resume_latency(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
- return dev_pm_qos_raw_read_value(dev);
+ return dev_pm_qos_raw_resume_latency(dev);
}
/**
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
+ * @type: QoS request type.
*/
-s32 dev_pm_qos_read_value(struct device *dev)
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
{
+ struct dev_pm_qos *qos = dev->power.qos;
unsigned long flags;
s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
- ret = __dev_pm_qos_read_value(dev);
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ : pm_qos_read_value(&qos->resume_latency);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
+ : pm_qos_read_value(&qos->min_frequency);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
+ : pm_qos_read_value(&qos->max_frequency);
+ break;
+ default:
+ WARN_ON(1);
+ ret = 0;
+ }
+
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
@@ -149,6 +169,14 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = pm_qos_update_target(&qos->min_frequency,
+ &req->data.pnode, action, value);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = pm_qos_update_target(&qos->max_frequency,
+ &req->data.pnode, action, value);
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -177,12 +205,11 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
if (!qos)
return -ENOMEM;
- n = kzalloc(sizeof(*n), GFP_KERNEL);
+ n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
if (!n) {
kfree(qos);
return -ENOMEM;
}
- BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->resume_latency;
plist_head_init(&c->list);
@@ -191,6 +218,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->latency_tolerance;
plist_head_init(&c->list);
@@ -199,6 +227,24 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
+ c = &qos->min_frequency;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ c->type = PM_QOS_MAX;
+ c->notifiers = ++n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+ c = &qos->max_frequency;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
+ c->type = PM_QOS_MIN;
+ c->notifiers = ++n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -252,11 +298,25 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
c = &qos->latency_tolerance;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
+ c = &qos->min_frequency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->max_frequency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -368,6 +428,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
@@ -467,6 +529,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
*
* @dev: target device for the constraint
* @notifier: notifier block managed by caller.
+ * @type: request type.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
@@ -474,7 +537,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
* If the device's constraints object doesn't exist when this routine is called,
* it will be created (or error code will be returned if that fails).
*/
-int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{
int ret = 0;
@@ -485,10 +549,28 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
- if (!ret)
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+unlock:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
@@ -500,24 +582,44 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
*
* @dev: target device for the constraint
* @notifier: notifier block to be removed.
+ * @type: request type.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value.
*/
int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{
- int retval = 0;
+ int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
- notifier);
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+unlock:
mutex_unlock(&dev_pm_qos_mtx);
- return retval;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
@@ -577,6 +679,9 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
+ default:
+ WARN_ON(1);
+ return;
}
__dev_pm_qos_remove_request(req);
kfree(req);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 952a1e7057c7..b75335508d2c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -275,7 +275,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) == 0)
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index a4984136c19d..0fd6f97ee523 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
- depends on SOUNDWIRE_BUS
+ depends on SOUNDWIRE
config REGMAP_SCCB
tristate
OpenPOWER on IntegriCloud