diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/arch_topology.c | 44 | ||||
-rw-r--r-- | drivers/base/component.c | 3 | ||||
-rw-r--r-- | drivers/base/core.c | 72 | ||||
-rw-r--r-- | drivers/base/cpu.c | 23 | ||||
-rw-r--r-- | drivers/base/dd.c | 91 | ||||
-rw-r--r-- | drivers/base/firmware_loader/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/firmware_loader/fallback.c | 2 | ||||
-rw-r--r-- | drivers/base/firmware_loader/fallback.h | 10 | ||||
-rw-r--r-- | drivers/base/firmware_loader/fallback_platform.c | 36 | ||||
-rw-r--r-- | drivers/base/firmware_loader/firmware.h | 4 | ||||
-rw-r--r-- | drivers/base/firmware_loader/main.c | 33 | ||||
-rw-r--r-- | drivers/base/memory.c | 23 | ||||
-rw-r--r-- | drivers/base/platform.c | 52 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 2 | ||||
-rw-r--r-- | drivers/base/power/main.c | 12 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 36 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 17 | ||||
-rw-r--r-- | drivers/base/property.c | 1 | ||||
-rw-r--r-- | drivers/base/swnode.c | 14 |
19 files changed, 304 insertions, 172 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 8d63673c1689..4d0a0038b476 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -106,7 +106,7 @@ static void update_topology_flags_workfn(struct work_struct *work) update_topology = 0; } -static u32 capacity_scale; +static DEFINE_PER_CPU(u32, freq_factor) = 1; static u32 *raw_capacity; static int free_raw_capacity(void) @@ -120,17 +120,23 @@ static int free_raw_capacity(void) void topology_normalize_cpu_scale(void) { u64 capacity; + u64 capacity_scale; int cpu; if (!raw_capacity) return; - pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); + capacity_scale = 1; for_each_possible_cpu(cpu) { - pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", - cpu, raw_capacity[cpu]); - capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) - / capacity_scale; + capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity_scale = max(capacity, capacity_scale); + } + + pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); + for_each_possible_cpu(cpu) { + capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); topology_set_cpu_scale(cpu, capacity); pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", cpu, topology_get_cpu_scale(cpu)); @@ -139,6 +145,7 @@ void topology_normalize_cpu_scale(void) bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) { + struct clk *cpu_clk; static bool cap_parsing_failed; int ret; u32 cpu_capacity; @@ -158,10 +165,22 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return false; } } - capacity_scale = max(cpu_capacity, capacity_scale); raw_capacity[cpu] = cpu_capacity; pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", cpu_node, raw_capacity[cpu]); + + /* + * Update freq_factor for calculating early boot cpu capacities. + * For non-clk CPU DVFS mechanism, there's no way to get the + * frequency value now, assuming they are running at the same + * frequency (by keeping the initial freq_factor value). + */ + cpu_clk = of_clk_get(cpu_node, 0); + if (!PTR_ERR_OR_ZERO(cpu_clk)) { + per_cpu(freq_factor, cpu) = + clk_get_rate(cpu_clk) / 1000; + clk_put(cpu_clk); + } } else { if (raw_capacity) { pr_err("cpu_capacity: missing %pOF raw capacity\n", @@ -200,11 +219,8 @@ init_cpu_capacity_callback(struct notifier_block *nb, cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) { - raw_capacity[cpu] = topology_get_cpu_scale(cpu) * - policy->cpuinfo.max_freq / 1000UL; - capacity_scale = max(raw_capacity[cpu], capacity_scale); - } + for_each_cpu(cpu, policy->related_cpus) + per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; if (cpumask_empty(cpus_to_visit)) { topology_normalize_cpu_scale(); @@ -293,7 +309,7 @@ static int __init get_cpu_for_node(struct device_node *node) static int __init parse_core(struct device_node *core, int package_id, int core_id) { - char name[10]; + char name[20]; bool leaf = true; int i = 0; int cpu; @@ -339,7 +355,7 @@ static int __init parse_core(struct device_node *core, int package_id, static int __init parse_cluster(struct device_node *cluster, int depth) { - char name[10]; + char name[20]; bool leaf = true; bool has_cores = false; struct device_node *c; diff --git a/drivers/base/component.c b/drivers/base/component.c index c7879f5ae2fb..e97704104784 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -528,7 +528,8 @@ static void component_unbind(struct component *component, { WARN_ON(!component->bound); - component->ops->unbind(component->dev, master->dev, data); + if (component->ops && component->ops->unbind) + component->ops->unbind(component->dev, master->dev, data); component->bound = false; /* Release all resources claimed in the binding of this component */ diff --git a/drivers/base/core.c b/drivers/base/core.c index 42a672456432..5e3cc1651c78 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -64,12 +64,12 @@ static inline void device_links_write_unlock(void) mutex_unlock(&device_links_lock); } -int device_links_read_lock(void) +int device_links_read_lock(void) __acquires(&device_links_srcu) { return srcu_read_lock(&device_links_srcu); } -void device_links_read_unlock(int idx) +void device_links_read_unlock(int idx) __releases(&device_links_srcu) { srcu_read_unlock(&device_links_srcu, idx); } @@ -523,9 +523,13 @@ static void device_link_add_missing_supplier_links(void) mutex_lock(&wfs_lock); list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, - links.needs_suppliers) - if (!fwnode_call_int_op(dev->fwnode, add_links, dev)) + links.needs_suppliers) { + int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); + if (!ret) list_del_init(&dev->links.needs_suppliers); + else if (ret != -ENODEV) + dev->links.need_for_probe = false; + } mutex_unlock(&wfs_lock); } @@ -718,6 +722,8 @@ static void __device_links_queue_sync_state(struct device *dev, { struct device_link *link; + if (!dev_has_sync_state(dev)) + return; if (dev->state_synced) return; @@ -745,25 +751,31 @@ static void __device_links_queue_sync_state(struct device *dev, /** * device_links_flush_sync_list - Call sync_state() on a list of devices * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller * * Calls sync_state() on all the devices that have been queued for it. This - * function is used in conjunction with __device_links_queue_sync_state(). + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held. */ -static void device_links_flush_sync_list(struct list_head *list) +static void device_links_flush_sync_list(struct list_head *list, + struct device *dont_lock_dev) { struct device *dev, *tmp; list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { list_del_init(&dev->links.defer_sync); - device_lock(dev); + if (dev != dont_lock_dev) + device_lock(dev); if (dev->bus->sync_state) dev->bus->sync_state(dev); else if (dev->driver && dev->driver->sync_state) dev->driver->sync_state(dev); - device_unlock(dev); + if (dev != dont_lock_dev) + device_unlock(dev); put_device(dev); } @@ -801,7 +813,7 @@ void device_links_supplier_sync_state_resume(void) out: device_links_write_unlock(); - device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, NULL); } static int sync_state_resume_initcall(void) @@ -813,7 +825,7 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync)) + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) list_add_tail(&sup->links.defer_sync, &deferred_sync); } @@ -865,6 +877,11 @@ void device_links_driver_bound(struct device *dev) driver_deferred_probe_add(link->consumer); } + if (defer_sync_state_count) + __device_links_supplier_defer_sync(dev); + else + __device_links_queue_sync_state(dev, &sync_list); + list_for_each_entry(link, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; @@ -883,7 +900,7 @@ void device_links_driver_bound(struct device *dev) device_links_write_unlock(); - device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, dev); } static void device_link_drop_managed(struct device_link *link) @@ -2328,6 +2345,31 @@ static int device_private_init(struct device *dev) return 0; } +static u32 fw_devlink_flags; +static int __init fw_devlink_setup(char *arg) +{ + if (!arg) + return -EINVAL; + + if (strcmp(arg, "off") == 0) { + fw_devlink_flags = 0; + } else if (strcmp(arg, "permissive") == 0) { + fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; + } else if (strcmp(arg, "on") == 0) { + fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; + } else if (strcmp(arg, "rpm") == 0) { + fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | + DL_FLAG_PM_RUNTIME; + } + return 0; +} +early_param("fw_devlink", fw_devlink_setup); + +u32 fw_devlink_get_flags(void) +{ + return fw_devlink_flags; +} + /** * device_add - add device to device hierarchy. * @dev: device. @@ -2362,6 +2404,7 @@ int device_add(struct device *dev) struct class_interface *class_intf; int error = -EINVAL, fw_ret; struct kobject *glue_dir = NULL; + bool is_fwnode_dev = false; dev = get_device(dev); if (!dev) @@ -2459,8 +2502,10 @@ int device_add(struct device *dev) kobject_uevent(&dev->kobj, KOBJ_ADD); - if (dev->fwnode && !dev->fwnode->dev) + if (dev->fwnode && !dev->fwnode->dev) { dev->fwnode->dev = dev; + is_fwnode_dev = true; + } /* * Check if any of the other devices (consumers) have been waiting for @@ -2476,7 +2521,8 @@ int device_add(struct device *dev) */ device_link_add_missing_supplier_links(); - if (fwnode_has_op(dev->fwnode, add_links)) { + if (fw_devlink_flags && is_fwnode_dev && + fwnode_has_op(dev->fwnode, add_links)) { fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); if (fw_ret == -ENODEV) device_link_wait_for_mandatory_supplier(dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 6265871a4af2..9a1c00fbbaef 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -55,7 +55,7 @@ static int cpu_subsys_online(struct device *dev) if (from_nid == NUMA_NO_NODE) return -ENODEV; - ret = cpu_up(cpuid); + ret = cpu_device_up(dev); /* * When hot adding memory to memoryless node and enabling a cpu * on the node, node number of the cpu may internally change. @@ -69,7 +69,7 @@ static int cpu_subsys_online(struct device *dev) static int cpu_subsys_offline(struct device *dev) { - return cpu_down(dev->id); + return cpu_device_down(dev); } void unregister_cpu(struct cpu *cpu) @@ -231,8 +231,7 @@ static struct cpu_attr cpu_attrs[] = { static ssize_t print_cpus_kernel_max(struct device *dev, struct device_attribute *attr, char *buf) { - int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); - return n; + return sprintf(buf, "%d\n", NR_CPUS - 1); } static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); @@ -258,13 +257,13 @@ static ssize_t print_cpus_offline(struct device *dev, buf[n++] = ','; if (nr_cpu_ids == total_cpus-1) - n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); + n += scnprintf(&buf[n], len - n, "%u", nr_cpu_ids); else - n += snprintf(&buf[n], len - n, "%u-%d", + n += scnprintf(&buf[n], len - n, "%u-%d", nr_cpu_ids, total_cpus-1); } - n += snprintf(&buf[n], len - n, "\n"); + n += scnprintf(&buf[n], len - n, "\n"); return n; } static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); @@ -272,7 +271,7 @@ static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); static ssize_t print_cpus_isolated(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; + int n; cpumask_var_t isolated; if (!alloc_cpumask_var(&isolated, GFP_KERNEL)) @@ -280,7 +279,7 @@ static ssize_t print_cpus_isolated(struct device *dev, cpumask_andnot(isolated, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN)); - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated)); + n = sprintf(buf, "%*pbl\n", cpumask_pr_args(isolated)); free_cpumask_var(isolated); @@ -292,11 +291,7 @@ static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); static ssize_t print_cpus_nohz_full(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; - - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); - - return n; + return sprintf(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); } static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL); #endif diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b25bcab2a26b..06ec0e851fa1 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -224,76 +224,52 @@ static int deferred_devs_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(deferred_devs); -static int deferred_probe_timeout = -1; +#ifdef CONFIG_MODULES +/* + * In the case of modules, set the default probe timeout to + * 30 seconds to give userland some time to load needed modules + */ +int driver_deferred_probe_timeout = 30; +#else +/* In the case of !modules, no probe timeout needed */ +int driver_deferred_probe_timeout = -1; +#endif +EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); + static int __init deferred_probe_timeout_setup(char *str) { int timeout; if (!kstrtoint(str, 10, &timeout)) - deferred_probe_timeout = timeout; + driver_deferred_probe_timeout = timeout; return 1; } __setup("deferred_probe_timeout=", deferred_probe_timeout_setup); -static int __driver_deferred_probe_check_state(struct device *dev) -{ - if (!initcalls_done) - return -EPROBE_DEFER; - - if (!deferred_probe_timeout) { - dev_WARN(dev, "deferred probe timeout, ignoring dependency"); - return -ETIMEDOUT; - } - - return 0; -} - /** * driver_deferred_probe_check_state() - Check deferred probe state * @dev: device to check * - * Returns -ENODEV if init is done and all built-in drivers have had a chance - * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug - * timeout has expired, or -EPROBE_DEFER if none of those conditions are met. + * Return: + * -ENODEV if initcalls have completed and modules are disabled. + * -ETIMEDOUT if the deferred probe timeout was set and has expired + * and modules are enabled. + * -EPROBE_DEFER in other cases. * * Drivers or subsystems can opt-in to calling this function instead of directly * returning -EPROBE_DEFER. */ int driver_deferred_probe_check_state(struct device *dev) { - int ret; - - ret = __driver_deferred_probe_check_state(dev); - if (ret < 0) - return ret; - - dev_warn(dev, "ignoring dependency for device, assuming no driver"); - - return -ENODEV; -} - -/** - * driver_deferred_probe_check_state_continue() - check deferred probe state - * @dev: device to check - * - * Returns -ETIMEDOUT if deferred probe debug timeout has expired, or - * -EPROBE_DEFER otherwise. - * - * Drivers or subsystems can opt-in to calling this function instead of - * directly returning -EPROBE_DEFER. - * - * This is similar to driver_deferred_probe_check_state(), but it allows the - * subsystem to keep deferring probe after built-in drivers have had a chance - * to probe. One scenario where that is useful is if built-in drivers rely on - * resources that are provided by modular drivers. - */ -int driver_deferred_probe_check_state_continue(struct device *dev) -{ - int ret; + if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) { + dev_warn(dev, "ignoring dependency for device, assuming no driver"); + return -ENODEV; + } - ret = __driver_deferred_probe_check_state(dev); - if (ret < 0) - return ret; + if (!driver_deferred_probe_timeout) { + dev_WARN(dev, "deferred probe timeout, ignoring dependency"); + return -ETIMEDOUT; + } return -EPROBE_DEFER; } @@ -302,7 +278,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) { struct device_private *private, *p; - deferred_probe_timeout = 0; + driver_deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); @@ -336,9 +312,9 @@ static int deferred_probe_initcall(void) driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); - if (deferred_probe_timeout > 0) { + if (driver_deferred_probe_timeout > 0) { schedule_delayed_work(&deferred_probe_timeout_work, - deferred_probe_timeout * HZ); + driver_deferred_probe_timeout * HZ); } return 0; } @@ -668,9 +644,10 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv) */ int driver_probe_done(void) { - pr_debug("%s: probe_count = %d\n", __func__, - atomic_read(&probe_count)); - if (atomic_read(&probe_count)) + int local_probe_count = atomic_read(&probe_count); + + pr_debug("%s: probe_count = %d\n", __func__, local_probe_count); + if (local_probe_count) return -EBUSY; return 0; } @@ -1222,7 +1199,7 @@ void driver_detach(struct device_driver *drv) spin_unlock(&drv->p->klist_devices.k_lock); break; } - dev_prv = list_entry(drv->p->klist_devices.k_list.prev, + dev_prv = list_last_entry(&drv->p->klist_devices.k_list, struct device_private, knode_driver.n_node); dev = dev_prv->device; diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile index 0b2dfa6259c9..e87843408fe6 100644 --- a/drivers/base/firmware_loader/Makefile +++ b/drivers/base/firmware_loader/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o obj-$(CONFIG_FW_LOADER) += firmware_class.o firmware_class-objs := main.o firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o +firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o obj-y += builtin/ diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 8704e1bae175..1e9c96e3ed63 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -525,7 +525,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, } retval = fw_sysfs_wait_timeout(fw_priv, timeout); - if (retval < 0) { + if (retval < 0 && retval != -ENOENT) { mutex_lock(&fw_lock); fw_load_abort(fw_sysfs); mutex_unlock(&fw_lock); diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h index 21063503e4ea..06f4577733a8 100644 --- a/drivers/base/firmware_loader/fallback.h +++ b/drivers/base/firmware_loader/fallback.h @@ -66,4 +66,14 @@ static inline void unregister_sysfs_loader(void) } #endif /* CONFIG_FW_LOADER_USER_HELPER */ +#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE +int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags); +#else +static inline int firmware_fallback_platform(struct fw_priv *fw_priv, + enum fw_opt opt_flags) +{ + return -ENOENT; +} +#endif + #endif /* __FIRMWARE_FALLBACK_H */ diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c new file mode 100644 index 000000000000..c88c745590fe --- /dev/null +++ b/drivers/base/firmware_loader/fallback_platform.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi_embedded_fw.h> +#include <linux/property.h> +#include <linux/security.h> +#include <linux/vmalloc.h> + +#include "fallback.h" +#include "firmware.h" + +int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags) +{ + const u8 *data; + size_t size; + int rc; + + if (!(opt_flags & FW_OPT_FALLBACK_PLATFORM)) + return -ENOENT; + + rc = security_kernel_load_data(LOADING_FIRMWARE_EFI_EMBEDDED); + if (rc) + return rc; + + rc = efi_get_embedded_fw(fw_priv->fw_name, &data, &size); + if (rc) + return rc; /* rc == -ENOENT when the fw was not found */ + + fw_priv->data = vmalloc(size); + if (!fw_priv->data) + return -ENOMEM; + + memcpy(fw_priv->data, data, size); + fw_priv->size = size; + fw_state_done(fw_priv); + return 0; +} diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 8656e5239a80..25836a6afc9f 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -29,6 +29,9 @@ * firmware caching mechanism. * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER. + * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in + * the platform's main firmware. If both this fallback and the sysfs + * fallback are enabled, then this fallback will be tried first. */ enum fw_opt { FW_OPT_UEVENT = BIT(0), @@ -37,6 +40,7 @@ enum fw_opt { FW_OPT_NO_WARN = BIT(3), FW_OPT_NOCACHE = BIT(4), FW_OPT_NOFALLBACK_SYSFS = BIT(5), + FW_OPT_FALLBACK_PLATFORM = BIT(6), }; enum fw_status { diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 57133a9dad09..76f79913916d 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -493,8 +493,10 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, } fw_priv->size = 0; - rc = kernel_read_file_from_path(path, &buffer, &size, - msize, id); + + /* load firmware files from the mount namespace of init */ + rc = kernel_read_file_from_path_initns(path, &buffer, + &size, msize, id); if (rc) { if (rc != -ENOENT) dev_warn(device, "loading %s failed with error %d\n", @@ -776,6 +778,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name, fw_decompress_xz); #endif + if (ret == -ENOENT) + ret = firmware_fallback_platform(fw->priv, opt_flags); + if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, @@ -884,6 +889,30 @@ int request_firmware_direct(const struct firmware **firmware_p, EXPORT_SYMBOL_GPL(request_firmware_direct); /** + * firmware_request_platform() - request firmware with platform-fw fallback + * @firmware: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function is similar in behaviour to request_firmware, except that if + * direct filesystem lookup fails, it will fallback to looking for a copy of the + * requested firmware embedded in the platform's main (e.g. UEFI) firmware. + **/ +int firmware_request_platform(const struct firmware **firmware, + const char *name, struct device *device) +{ + int ret; + + /* Need to pin this module until return */ + __module_get(THIS_MODULE); + ret = _request_firmware(firmware, name, device, NULL, 0, + FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(firmware_request_platform); + +/** * firmware_request_cache() - cache firmware for suspend so resume can use it * @name: name of firmware file * @device: device for which firmware should be cached for diff --git a/drivers/base/memory.c b/drivers/base/memory.c index b9f474c11393..4086718f6876 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -97,30 +97,13 @@ static ssize_t phys_index_show(struct device *dev, } /* - * Show whether the memory block is likely to be offlineable (or is already - * offline). Once offline, the memory block could be removed. The return - * value does, however, not indicate that there is a way to remove the - * memory block. + * Legacy interface that we cannot remove. Always indicate "removable" + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. */ static ssize_t removable_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = to_memory_block(dev); - unsigned long pfn; - int ret = 1, i; - - if (mem->state != MEM_ONLINE) - goto out; - - for (i = 0; i < sections_per_block; i++) { - if (!present_section_nr(mem->start_section_nr + i)) - continue; - pfn = section_nr_to_pfn(mem->start_section_nr + i); - ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); - } - -out: - return sprintf(buf, "%d\n", ret); + return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); } /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 7fa654f1288b..5255550b7c34 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -63,6 +63,28 @@ EXPORT_SYMBOL_GPL(platform_get_resource); #ifdef CONFIG_HAS_IOMEM /** + * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a + * platform device and get resource + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource management + * @index: resource index + * @res: optional output parameter to store a pointer to the obtained resource. + */ +void __iomem * +devm_platform_get_and_ioremap_resource(struct platform_device *pdev, + unsigned int index, struct resource **res) +{ + struct resource *r; + + r = platform_get_resource(pdev, IORESOURCE_MEM, index); + if (res) + *res = r; + return devm_ioremap_resource(&pdev->dev, r); +} +EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); + +/** * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform * device * @@ -73,10 +95,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource); void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, index); - return devm_ioremap_resource(&pdev->dev, res); + return devm_platform_get_and_ioremap_resource(pdev, index, NULL); } EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); @@ -363,10 +382,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev) { if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - if (!pdev->dma_mask) - pdev->dma_mask = DMA_BIT_MASK(32); - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dma_mask; + if (!pdev->dev.dma_mask) { + pdev->platform_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->platform_dma_mask; + } }; /** @@ -662,20 +681,8 @@ struct platform_device *platform_device_register_full( pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { - /* - * This memory isn't freed when the device is put, - * I don't have a nice idea for that though. Conceptually - * dma_mask in struct device should not be a pointer. - * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 - */ - pdev->dev.dma_mask = - kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); - if (!pdev->dev.dma_mask) - goto err; - - kmemleak_ignore(pdev->dev.dma_mask); - - *pdev->dev.dma_mask = pdevinfo->dma_mask; + pdev->platform_dma_mask = pdevinfo->dma_mask; + pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } @@ -700,7 +707,6 @@ struct platform_device *platform_device_register_full( if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); - kfree(pdev->dev.dma_mask); platform_device_put(pdev); return ERR_PTR(ret); } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 959d6d5eb000..0a01df608849 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn, ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); if (ret <= 0) - return ret; + return ret == -ENOENT ? 0 : ret; /* Loop over the phandles until all the requested entry is found */ of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0e99a760aebd..6d1dee7051eb 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -40,6 +40,10 @@ typedef int (*pm_callback_t)(struct device *); +#define list_for_each_entry_rcu_locked(pos, head, member) \ + list_for_each_entry_rcu(pos, head, member, \ + device_links_read_lock_held()) + /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) * callbacks freeing the link objects for the links in the list we're * walking. */ - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->supplier, async); @@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) * continue instead of trying to continue in parallel with its * unregistration). */ - list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->consumer, async); @@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) link->supplier->power.must_resume = true; device_links_read_unlock(idx); @@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { spin_lock_irq(&link->supplier->power.lock); link->supplier->power.direct_complete = false; spin_unlock_irq(&link->supplier->power.lock); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 16134a69bf6f..99c7da112c95 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** - * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. + * pm_runtime_get_if_active - Conditionally bump up the device's usage counter. * @dev: Device to handle. * * Return -EINVAL if runtime PM is disabled for the device. * - * If that's not the case and if the device's runtime PM status is RPM_ACTIVE - * and the runtime PM usage counter is nonzero, increment the counter and - * return 1. Otherwise return 0 without changing the counter. + * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either + * ign_usage_count is true or the device's usage_count is non-zero, increment + * the counter and return 1. Otherwise return 0 without changing the counter. + * + * If ign_usage_count is true, the function can be used to prevent suspending + * the device when its runtime PM status is RPM_ACTIVE. + * + * If ign_usage_count is false, the function can be used to prevent suspending + * the device when both its runtime PM status is RPM_ACTIVE and its usage_count + * is non-zero. + * + * The caller is resposible for putting the device's usage count when ther + * return value is greater than zero. */ -int pm_runtime_get_if_in_use(struct device *dev) +int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) { unsigned long flags; int retval; spin_lock_irqsave(&dev->power.lock, flags); - retval = dev->power.disable_depth > 0 ? -EINVAL : - dev->power.runtime_status == RPM_ACTIVE - && atomic_inc_not_zero(&dev->power.usage_count); + if (dev->power.disable_depth > 0) { + retval = -EINVAL; + } else if (dev->power.runtime_status != RPM_ACTIVE) { + retval = 0; + } else if (ign_usage_count) { + retval = 1; + atomic_inc(&dev->power.usage_count); + } else { + retval = atomic_inc_not_zero(&dev->power.usage_count); + } trace_rpm_usage_rcuidle(dev, 0); spin_unlock_irqrestore(&dev->power.lock, flags); + return retval; } -EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); +EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); /** * __pm_runtime_set_status - Set runtime PM status of a device. diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 27f3e60608e5..92073ac68473 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state; #define pm_suspend_target_state (PM_SUSPEND_ON) #endif +#define list_for_each_entry_rcu_locked(pos, head, member) \ + list_for_each_entry_rcu(pos, head, member, \ + srcu_read_lock_held(&wakeup_srcu)) /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. @@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws) { if (ws) { wakeup_source_remove(ws); - wakeup_source_sysfs_remove(ws); + if (ws->dev) + wakeup_source_sysfs_remove(ws); + wakeup_source_destroy(ws); } } @@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void) int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } @@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void) int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } @@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void) struct wakeup_source *last_activity_ws = NULL; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (ws->active) { pm_pr_dbg("active wakeup source: %s\n", ws->name); active = 1; @@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set) int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { ws->autosleep_enabled = set; @@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m, } *srcuidx = srcu_read_lock(&wakeup_srcu); - list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (n-- <= 0) return ws; } diff --git a/drivers/base/property.c b/drivers/base/property.c index 511f6d7acdfe..5f35c0ccf5e0 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -566,6 +566,7 @@ const char *fwnode_get_name(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_name); } +EXPORT_SYMBOL_GPL(fwnode_get_name); /** * fwnode_get_name_prefix - Return the prefix of node for printing purposes diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 0b081dee1e95..de8d3543e8fe 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -608,6 +608,13 @@ static void software_node_release(struct kobject *kobj) { struct swnode *swnode = kobj_to_swnode(kobj); + if (swnode->parent) { + ida_simple_remove(&swnode->parent->child_ids, swnode->id); + list_del(&swnode->entry); + } else { + ida_simple_remove(&swnode_root_ids, swnode->id); + } + if (swnode->allocated) { property_entries_free(swnode->node->properties); kfree(swnode->node); @@ -773,13 +780,6 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode) if (!swnode) return; - if (swnode->parent) { - ida_simple_remove(&swnode->parent->child_ids, swnode->id); - list_del(&swnode->entry); - } else { - ida_simple_remove(&swnode_root_ids, swnode->id); - } - kobject_put(&swnode->kobj); } EXPORT_SYMBOL_GPL(fwnode_remove_software_node); |