diff options
Diffstat (limited to 'drivers/base')
28 files changed, 1370 insertions, 433 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index d718ae4b907a..f046d21de57d 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -339,4 +339,12 @@ config CMA_ALIGNMENT endif +config GENERIC_ARCH_TOPOLOGY + bool + help + Enable support for architectures common topology code: e.g., parsing + CPU capacity information from DT, usage of such information for + appropriate scaling, sysfs interface for changing capacity values at + runtime. + endmenu diff --git a/drivers/base/Makefile b/drivers/base/Makefile index f2816f6ff76a..397e5c344e6a 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_SOC_BUS) += soc.o obj-$(CONFIG_PINCTRL) += pinctrl.o obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o +obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o obj-y += test/ diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c new file mode 100644 index 000000000000..d1c33a85059e --- /dev/null +++ b/drivers/base/arch_topology.c @@ -0,0 +1,243 @@ +/* + * Arch specific cpu topology information + * + * Copyright (C) 2016, ARM Ltd. + * Written by: Juri Lelli, ARM Ltd. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Released under the GPLv2 only. + * SPDX-License-Identifier: GPL-2.0 + */ + +#include <linux/acpi.h> +#include <linux/arch_topology.h> +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/sched/topology.h> + +static DEFINE_MUTEX(cpu_scale_mutex); +static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; + +unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +{ + per_cpu(cpu_scale, cpu) = capacity; +} + +static ssize_t cpu_capacity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + + return sprintf(buf, "%lu\n", + topology_get_cpu_scale(NULL, cpu->dev.id)); +} + +static ssize_t cpu_capacity_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + int this_cpu = cpu->dev.id; + int i; + unsigned long new_capacity; + ssize_t ret; + + if (!count) + return 0; + + ret = kstrtoul(buf, 0, &new_capacity); + if (ret) + return ret; + if (new_capacity > SCHED_CAPACITY_SCALE) + return -EINVAL; + + mutex_lock(&cpu_scale_mutex); + for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) + topology_set_cpu_scale(i, new_capacity); + mutex_unlock(&cpu_scale_mutex); + + return count; +} + +static DEVICE_ATTR_RW(cpu_capacity); + +static int register_cpu_capacity_sysctl(void) +{ + int i; + struct device *cpu; + + for_each_possible_cpu(i) { + cpu = get_cpu_device(i); + if (!cpu) { + pr_err("%s: too early to get CPU%d device!\n", + __func__, i); + continue; + } + device_create_file(cpu, &dev_attr_cpu_capacity); + } + + return 0; +} +subsys_initcall(register_cpu_capacity_sysctl); + +static u32 capacity_scale; +static u32 *raw_capacity; +static bool cap_parsing_failed; + +void topology_normalize_cpu_scale(void) +{ + u64 capacity; + int cpu; + + if (!raw_capacity || cap_parsing_failed) + return; + + pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); + mutex_lock(&cpu_scale_mutex); + for_each_possible_cpu(cpu) { + pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", + cpu, raw_capacity[cpu]); + capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) + / capacity_scale; + topology_set_cpu_scale(cpu, capacity); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", + cpu, topology_get_cpu_scale(NULL, cpu)); + } + mutex_unlock(&cpu_scale_mutex); +} + +int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) +{ + int ret = 1; + u32 cpu_capacity; + + if (cap_parsing_failed) + return !ret; + + ret = of_property_read_u32(cpu_node, + "capacity-dmips-mhz", + &cpu_capacity); + if (!ret) { + if (!raw_capacity) { + raw_capacity = kcalloc(num_possible_cpus(), + sizeof(*raw_capacity), + GFP_KERNEL); + if (!raw_capacity) { + pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); + cap_parsing_failed = true; + return 0; + } + } + capacity_scale = max(cpu_capacity, capacity_scale); + raw_capacity[cpu] = cpu_capacity; + pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n", + cpu_node->full_name, raw_capacity[cpu]); + } else { + if (raw_capacity) { + pr_err("cpu_capacity: missing %s raw capacity\n", + cpu_node->full_name); + pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); + } + cap_parsing_failed = true; + kfree(raw_capacity); + } + + return !ret; +} + +#ifdef CONFIG_CPU_FREQ +static cpumask_var_t cpus_to_visit; +static bool cap_parsing_done; +static void parsing_done_workfn(struct work_struct *work); +static DECLARE_WORK(parsing_done_work, parsing_done_workfn); + +static int +init_cpu_capacity_callback(struct notifier_block *nb, + unsigned long val, + void *data) +{ + struct cpufreq_policy *policy = data; + int cpu; + + if (cap_parsing_failed || cap_parsing_done) + return 0; + + switch (val) { + case CPUFREQ_NOTIFY: + pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", + cpumask_pr_args(policy->related_cpus), + cpumask_pr_args(cpus_to_visit)); + cpumask_andnot(cpus_to_visit, + cpus_to_visit, + policy->related_cpus); + for_each_cpu(cpu, policy->related_cpus) { + raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * + policy->cpuinfo.max_freq / 1000UL; + capacity_scale = max(raw_capacity[cpu], capacity_scale); + } + if (cpumask_empty(cpus_to_visit)) { + topology_normalize_cpu_scale(); + kfree(raw_capacity); + pr_debug("cpu_capacity: parsing done\n"); + cap_parsing_done = true; + schedule_work(&parsing_done_work); + } + } + return 0; +} + +static struct notifier_block init_cpu_capacity_notifier = { + .notifier_call = init_cpu_capacity_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + /* + * on ACPI-based systems we need to use the default cpu capacity + * until we have the necessary code to parse the cpu capacity, so + * skip registering cpufreq notifier. + */ + if (!acpi_disabled || !raw_capacity) + return -EINVAL; + + if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { + pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n"); + return -ENOMEM; + } + + cpumask_copy(cpus_to_visit, cpu_possible_mask); + + return cpufreq_register_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +static void parsing_done_workfn(struct work_struct *work) +{ + cpufreq_unregister_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); +} + +#else +static int __init free_raw_capacity(void) +{ + kfree(raw_capacity); + + return 0; +} +core_initcall(free_raw_capacity); +#endif diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 6470eb8088f4..e162c9a789ba 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -466,35 +466,6 @@ int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, } EXPORT_SYMBOL_GPL(bus_for_each_drv); -static int device_add_attrs(struct bus_type *bus, struct device *dev) -{ - int error = 0; - int i; - - if (!bus->dev_attrs) - return 0; - - for (i = 0; bus->dev_attrs[i].attr.name; i++) { - error = device_create_file(dev, &bus->dev_attrs[i]); - if (error) { - while (--i >= 0) - device_remove_file(dev, &bus->dev_attrs[i]); - break; - } - } - return error; -} - -static void device_remove_attrs(struct bus_type *bus, struct device *dev) -{ - int i; - - if (bus->dev_attrs) { - for (i = 0; bus->dev_attrs[i].attr.name; i++) - device_remove_file(dev, &bus->dev_attrs[i]); - } -} - /** * bus_add_device - add device to bus * @dev: device being added @@ -510,12 +481,9 @@ int bus_add_device(struct device *dev) if (bus) { pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev)); - error = device_add_attrs(bus, dev); - if (error) - goto out_put; error = device_add_groups(dev, bus->dev_groups); if (error) - goto out_id; + goto out_put; error = sysfs_create_link(&bus->p->devices_kset->kobj, &dev->kobj, dev_name(dev)); if (error) @@ -532,8 +500,6 @@ out_subsys: sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); out_groups: device_remove_groups(dev, bus->dev_groups); -out_id: - device_remove_attrs(bus, dev); out_put: bus_put(dev->bus); return error; @@ -590,7 +556,6 @@ void bus_remove_device(struct device *dev) sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_remove_link(&dev->bus->p->devices_kset->kobj, dev_name(dev)); - device_remove_attrs(dev->bus, dev); device_remove_groups(dev, dev->bus->dev_groups); if (klist_node_attached(&dev->p->knode_bus)) klist_del(&dev->p->knode_bus); @@ -648,10 +613,7 @@ static void remove_probe_files(struct bus_type *bus) static ssize_t uevent_store(struct device_driver *drv, const char *buf, size_t count) { - enum kobject_action action; - - if (kobject_action_type(buf, count, &action) == 0) - kobject_uevent(&drv->p->kobj, action); + kobject_synth_uevent(&drv->p->kobj, buf, count); return count; } static DRIVER_ATTR_WO(uevent); @@ -868,10 +830,7 @@ static void klist_devices_put(struct klist_node *n) static ssize_t bus_uevent_store(struct bus_type *bus, const char *buf, size_t count) { - enum kobject_action action; - - if (kobject_action_type(buf, count, &action) == 0) - kobject_uevent(&bus->p->subsys.kobj, action); + kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); return count; } static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); diff --git a/drivers/base/class.c b/drivers/base/class.c index a2b2896693d6..52eb8e644acd 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -119,36 +119,6 @@ static void class_put(struct class *cls) kset_put(&cls->p->subsys); } -static int add_class_attrs(struct class *cls) -{ - int i; - int error = 0; - - if (cls->class_attrs) { - for (i = 0; cls->class_attrs[i].attr.name; i++) { - error = class_create_file(cls, &cls->class_attrs[i]); - if (error) - goto error; - } - } -done: - return error; -error: - while (--i >= 0) - class_remove_file(cls, &cls->class_attrs[i]); - goto done; -} - -static void remove_class_attrs(struct class *cls) -{ - int i; - - if (cls->class_attrs) { - for (i = 0; cls->class_attrs[i].attr.name; i++) - class_remove_file(cls, &cls->class_attrs[i]); - } -} - static void klist_class_dev_get(struct klist_node *n) { struct device *dev = container_of(n, struct device, knode_class); @@ -217,8 +187,6 @@ int __class_register(struct class *cls, struct lock_class_key *key) } error = class_add_groups(class_get(cls), cls->class_groups); class_put(cls); - error = add_class_attrs(class_get(cls)); - class_put(cls); return error; } EXPORT_SYMBOL_GPL(__class_register); @@ -226,7 +194,6 @@ EXPORT_SYMBOL_GPL(__class_register); void class_unregister(struct class *cls) { pr_debug("device class '%s': unregistering\n", cls->name); - remove_class_attrs(cls); class_remove_groups(cls, cls->class_groups); kset_unregister(&cls->p->subsys); } diff --git a/drivers/base/core.c b/drivers/base/core.c index 6bb60fb6a30b..8dde934f8d15 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -981,12 +981,9 @@ out: static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - enum kobject_action action; + if (kobject_synth_uevent(&dev->kobj, buf, count)) + dev_err(dev, "uevent: failed to send synthetic uevent\n"); - if (kobject_action_type(buf, count, &action) == 0) - kobject_uevent(&dev->kobj, action); - else - dev_err(dev, "uevent: unknown action-string\n"); return count; } static DEVICE_ATTR_RW(uevent); @@ -1607,7 +1604,7 @@ int device_private_init(struct device *dev) */ int device_add(struct device *dev) { - struct device *parent = NULL; + struct device *parent; struct kobject *kobj; struct class_interface *class_intf; int error = -EINVAL; @@ -2884,3 +2881,19 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) else dev->fwnode = fwnode; } + +/** + * device_set_of_node_from_dev - reuse device-tree node of another device + * @dev: device whose device-tree node is being set + * @dev2: device whose device-tree node is being reused + * + * Takes another reference to the new device-tree node after first dropping + * any reference held to the old node. + */ +void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) +{ + of_node_put(dev->of_node); + dev->of_node = of_node_get(dev2->of_node); + dev->of_node_reused = true; +} +EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a1fbf55c4d3a..4882f06d12df 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -19,6 +19,7 @@ #include <linux/device.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/wait.h> @@ -356,6 +357,10 @@ re_probe: if (ret) goto pinctrl_bind_failed; + ret = dma_configure(dev); + if (ret) + goto dma_failed; + if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", __func__, dev_name(dev)); @@ -417,6 +422,8 @@ re_probe: goto done; probe_failed: + dma_deconfigure(dev); +dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); @@ -826,6 +833,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); + dma_deconfigure(dev); + devres_release_all(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index b55804cac4c4..ea9726e71468 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -165,7 +165,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, { int ret; - ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma); + ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, + "reserved", res_cma); if (ret) return ret; @@ -258,7 +259,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem) return -EINVAL; } - err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma); + err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); if (err) { pr_err("Reserved memory: unable to setup CMA region\n"); return err; diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index efd71cf4fdea..9dbef4d1baa4 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -7,9 +7,11 @@ * This file is released under the GPLv2. */ +#include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gfp.h> +#include <linux/of_device.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -273,6 +275,24 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, EXPORT_SYMBOL(dma_common_mmap); #ifdef CONFIG_MMU +static struct vm_struct *__dma_common_pages_remap(struct page **pages, + size_t size, unsigned long vm_flags, pgprot_t prot, + const void *caller) +{ + struct vm_struct *area; + + area = get_vm_area_caller(size, vm_flags, caller); + if (!area) + return NULL; + + if (map_vm_area(area, prot, pages)) { + vunmap(area->addr); + return NULL; + } + + return area; +} + /* * remaps an array of PAGE_SIZE pages into another vm_area * Cannot be used in non-sleeping contexts @@ -283,17 +303,12 @@ void *dma_common_pages_remap(struct page **pages, size_t size, { struct vm_struct *area; - area = get_vm_area_caller(size, vm_flags, caller); + area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); if (!area) return NULL; area->pages = pages; - if (map_vm_area(area, prot, pages)) { - vunmap(area->addr); - return NULL; - } - return area->addr; } @@ -308,21 +323,22 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, { int i; struct page **pages; - void *ptr; - unsigned long pfn; + struct vm_struct *area; pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); if (!pages) return NULL; - for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++) - pages[i] = pfn_to_page(pfn + i); + for (i = 0; i < (size >> PAGE_SHIFT); i++) + pages[i] = nth_page(page, i); - ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller); + area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); kfree(pages); - return ptr; + if (!area) + return NULL; + return area->addr; } /* @@ -341,3 +357,42 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) vunmap(cpu_addr); } #endif + +/* + * Common configuration to enable DMA API use for a device + */ +#include <linux/pci.h> + +int dma_configure(struct device *dev) +{ + struct device *bridge = NULL, *dma_dev = dev; + enum dev_dma_attr attr; + int ret = 0; + + if (dev_is_pci(dev)) { + bridge = pci_get_host_bridge_device(to_pci_dev(dev)); + dma_dev = bridge; + if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && + dma_dev->parent->of_node) + dma_dev = dma_dev->parent; + } + + if (dma_dev->of_node) { + ret = of_dma_configure(dev, dma_dev->of_node); + } else if (has_acpi_companion(dma_dev)) { + attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); + if (attr != DEV_DMA_NOT_SUPPORTED) + ret = acpi_dma_configure(dev, attr); + } + + if (bridge) + pci_put_host_bridge_device(bridge); + + return ret; +} + +void dma_deconfigure(struct device *dev) +{ + of_dma_deconfigure(dev); + acpi_dma_deconfigure(dev); +} diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index ac350c518e0c..b9f907eedbf7 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -260,6 +260,38 @@ static int fw_cache_piggyback_on_request(const char *name); * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); +static bool __enable_firmware = false; + +static void enable_firmware(void) +{ + mutex_lock(&fw_lock); + __enable_firmware = true; + mutex_unlock(&fw_lock); +} + +static void disable_firmware(void) +{ + mutex_lock(&fw_lock); + __enable_firmware = false; + mutex_unlock(&fw_lock); +} + +/* + * When disabled only the built-in firmware and the firmware cache will be + * used to look for firmware. + */ +static bool firmware_enabled(void) +{ + bool enabled = false; + + mutex_lock(&fw_lock); + if (__enable_firmware) + enabled = true; + mutex_unlock(&fw_lock); + + return enabled; +} + static struct firmware_cache fw_cache; static struct firmware_buf *__allocate_fw_buf(const char *fw_name, @@ -523,6 +555,44 @@ static int fw_add_devm_name(struct device *dev, const char *name) } #endif +static int assign_firmware_buf(struct firmware *fw, struct device *device, + unsigned int opt_flags) +{ + struct firmware_buf *buf = fw->priv; + + mutex_lock(&fw_lock); + if (!buf->size || fw_state_is_aborted(&buf->fw_st)) { + mutex_unlock(&fw_lock); + return -ENOENT; + } + + /* + * add firmware name into devres list so that we can auto cache + * and uncache firmware for device. + * + * device may has been deleted already, but the problem + * should be fixed in devres or driver core. + */ + /* don't cache firmware handled without uevent */ + if (device && (opt_flags & FW_OPT_UEVENT) && + !(opt_flags & FW_OPT_NOCACHE)) + fw_add_devm_name(device, buf->fw_id); + + /* + * After caching firmware image is started, let it piggyback + * on request firmware. + */ + if (!(opt_flags & FW_OPT_NOCACHE) && + buf->fwc->state == FW_LOADER_START_CACHE) { + if (fw_cache_piggyback_on_request(buf->fw_id)) + kref_get(&buf->ref); + } + + /* pass the pages buffer to driver at the last minute */ + fw_set_page_data(buf, fw); + mutex_unlock(&fw_lock); + return 0; +} /* * user-mode helper code @@ -562,23 +632,19 @@ static void fw_load_abort(struct firmware_priv *fw_priv) static LIST_HEAD(pending_fw_head); -/* reboot notifier for avoid deadlock with usermode_lock */ -static int fw_shutdown_notify(struct notifier_block *unused1, - unsigned long unused2, void *unused3) +static void kill_pending_fw_fallback_reqs(bool only_kill_custom) { + struct firmware_buf *buf; + struct firmware_buf *next; + mutex_lock(&fw_lock); - while (!list_empty(&pending_fw_head)) - __fw_load_abort(list_first_entry(&pending_fw_head, - struct firmware_buf, - pending_list)); + list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) { + if (!buf->need_uevent || !only_kill_custom) + __fw_load_abort(buf); + } mutex_unlock(&fw_lock); - return NOTIFY_DONE; } -static struct notifier_block fw_shutdown_nb = { - .notifier_call = fw_shutdown_notify, -}; - static ssize_t timeout_show(struct class *class, struct class_attribute *attr, char *buf) { @@ -1036,46 +1102,56 @@ err_put_dev: static int fw_load_from_user_helper(struct firmware *firmware, const char *name, struct device *device, - unsigned int opt_flags, long timeout) + unsigned int opt_flags) { struct firmware_priv *fw_priv; + long timeout; + int ret; + + timeout = firmware_loading_timeout(); + if (opt_flags & FW_OPT_NOWAIT) { + timeout = usermodehelper_read_lock_wait(timeout); + if (!timeout) { + dev_dbg(device, "firmware: %s loading timed out\n", + name); + return -EBUSY; + } + } else { + ret = usermodehelper_read_trylock(); + if (WARN_ON(ret)) { + dev_err(device, "firmware: %s will not be loaded\n", + name); + return ret; + } + } fw_priv = fw_create_instance(firmware, name, device, opt_flags); - if (IS_ERR(fw_priv)) - return PTR_ERR(fw_priv); + if (IS_ERR(fw_priv)) { + ret = PTR_ERR(fw_priv); + goto out_unlock; + } fw_priv->buf = firmware->priv; - return _request_firmware_load(fw_priv, opt_flags, timeout); -} + ret = _request_firmware_load(fw_priv, opt_flags, timeout); -#ifdef CONFIG_PM_SLEEP -/* kill pending requests without uevent to avoid blocking suspend */ -static void kill_requests_without_uevent(void) -{ - struct firmware_buf *buf; - struct firmware_buf *next; + if (!ret) + ret = assign_firmware_buf(firmware, device, opt_flags); - mutex_lock(&fw_lock); - list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) { - if (!buf->need_uevent) - __fw_load_abort(buf); - } - mutex_unlock(&fw_lock); +out_unlock: + usermodehelper_read_unlock(); + + return ret; } -#endif #else /* CONFIG_FW_LOADER_USER_HELPER */ static inline int fw_load_from_user_helper(struct firmware *firmware, const char *name, - struct device *device, unsigned int opt_flags, - long timeout) + struct device *device, unsigned int opt_flags) { return -ENOENT; } -#ifdef CONFIG_PM_SLEEP -static inline void kill_requests_without_uevent(void) { } -#endif +static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { } #endif /* CONFIG_FW_LOADER_USER_HELPER */ @@ -1124,45 +1200,6 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 1; /* need to load */ } -static int assign_firmware_buf(struct firmware *fw, struct device *device, - unsigned int opt_flags) -{ - struct firmware_buf *buf = fw->priv; - - mutex_lock(&fw_lock); - if (!buf->size || fw_state_is_aborted(&buf->fw_st)) { - mutex_unlock(&fw_lock); - return -ENOENT; - } - - /* - * add firmware name into devres list so that we can auto cache - * and uncache firmware for device. - * - * device may has been deleted already, but the problem - * should be fixed in devres or driver core. - */ - /* don't cache firmware handled without uevent */ - if (device && (opt_flags & FW_OPT_UEVENT) && - !(opt_flags & FW_OPT_NOCACHE)) - fw_add_devm_name(device, buf->fw_id); - - /* - * After caching firmware image is started, let it piggyback - * on request firmware. - */ - if (!(opt_flags & FW_OPT_NOCACHE) && - buf->fwc->state == FW_LOADER_START_CACHE) { - if (fw_cache_piggyback_on_request(buf->fw_id)) - kref_get(&buf->ref); - } - - /* pass the pages buffer to driver at the last minute */ - fw_set_page_data(buf, fw); - mutex_unlock(&fw_lock); - return 0; -} - /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1170,7 +1207,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name, unsigned int opt_flags) { struct firmware *fw = NULL; - long timeout; int ret; if (!firmware_p) @@ -1185,23 +1221,10 @@ _request_firmware(const struct firmware **firmware_p, const char *name, if (ret <= 0) /* error or already assigned */ goto out; - ret = 0; - timeout = firmware_loading_timeout(); - if (opt_flags & FW_OPT_NOWAIT) { - timeout = usermodehelper_read_lock_wait(timeout); - if (!timeout) { - dev_dbg(device, "firmware: %s loading timed out\n", - name); - ret = -EBUSY; - goto out; - } - } else { - ret = usermodehelper_read_trylock(); - if (WARN_ON(ret)) { - dev_err(device, "firmware: %s will not be loaded\n", - name); - goto out; - } + if (!firmware_enabled()) { + WARN(1, "firmware request while host is not available\n"); + ret = -EHOSTDOWN; + goto out; } ret = fw_get_filesystem_firmware(device, fw->priv); @@ -1213,15 +1236,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name, if (opt_flags & FW_OPT_USERHELPER) { dev_warn(device, "Falling back to user helper\n"); ret = fw_load_from_user_helper(fw, name, device, - opt_flags, timeout); + opt_flags); } - } - - if (!ret) + } else ret = assign_firmware_buf(fw, device, opt_flags); - usermodehelper_read_unlock(); - out: if (ret < 0) { release_firmware(fw); @@ -1717,6 +1736,62 @@ static void device_uncache_fw_images_delay(unsigned long delay) msecs_to_jiffies(delay)); } +/** + * fw_pm_notify - notifier for suspend/resume + * @notify_block: unused + * @mode: mode we are switching to + * @unused: unused + * + * Used to modify the firmware_class state as we move in between states. + * The firmware_class implements a firmware cache to enable device driver + * to fetch firmware upon resume before the root filesystem is ready. We + * disable API calls which do not use the built-in firmware or the firmware + * cache when we know these calls will not work. + * + * The inner logic behind all this is a bit complex so it is worth summarizing + * the kernel's own suspend/resume process with context and focus on how this + * can impact the firmware API. + * + * First a review on how we go to suspend:: + * + * pm_suspend() --> enter_state() --> + * sys_sync() + * suspend_prepare() --> + * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...); + * suspend_freeze_processes() --> + * freeze_processes() --> + * __usermodehelper_set_disable_depth(UMH_DISABLED); + * freeze all tasks ... + * freeze_kernel_threads() + * suspend_devices_and_enter() --> + * dpm_suspend_start() --> + * dpm_prepare() + * dpm_suspend() + * suspend_enter() --> + * platform_suspend_prepare() + * dpm_suspend_late() + * freeze_enter() + * syscore_suspend() + * + * When we resume we bail out of a loop from suspend_devices_and_enter() and + * unwind back out to the caller enter_state() where we were before as follows:: + * + * enter_state() --> + * suspend_devices_and_enter() --> (bail from loop) + * dpm_resume_end() --> + * dpm_resume() + * dpm_complete() + * suspend_finish() --> + * suspend_thaw_processes() --> + * thaw_processes() --> + * __usermodehelper_set_disable_depth(UMH_FREEZING); + * thaw_workqueues(); + * thaw all processes ... + * usermodehelper_enable(); + * pm_notifier_call_chain(PM_POST_SUSPEND); + * + * fw_pm_notify() works through pm_notifier_call_chain(). + */ static int fw_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { @@ -1724,8 +1799,13 @@ static int fw_pm_notify(struct notifier_block *notify_block, case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: case PM_RESTORE_PREPARE: - kill_requests_without_uevent(); + /* + * kill pending fallback requests with a custom fallback + * to avoid stalling suspend. + */ + kill_pending_fw_fallback_reqs(true); device_cache_fw_images(); + disable_firmware(); break; case PM_POST_SUSPEND: @@ -1738,6 +1818,7 @@ static int fw_pm_notify(struct notifier_block *notify_block, mutex_lock(&fw_lock); fw_cache.state = FW_LOADER_NO_CACHE; mutex_unlock(&fw_lock); + enable_firmware(); device_uncache_fw_images_delay(10 * MSEC_PER_SEC); break; @@ -1783,11 +1864,29 @@ static void __init fw_cache_init(void) #endif } +static int fw_shutdown_notify(struct notifier_block *unused1, + unsigned long unused2, void *unused3) +{ + disable_firmware(); + /* + * Kill all pending fallback requests to avoid both stalling shutdown, + * and avoid a deadlock with the usermode_lock. + */ + kill_pending_fw_fallback_reqs(false); + + return NOTIFY_DONE; +} + +static struct notifier_block fw_shutdown_nb = { + .notifier_call = fw_shutdown_notify, +}; + static int __init firmware_class_init(void) { + enable_firmware(); fw_cache_init(); -#ifdef CONFIG_FW_LOADER_USER_HELPER register_reboot_notifier(&fw_shutdown_nb); +#ifdef CONFIG_FW_LOADER_USER_HELPER return class_register(&firmware_class); #else return 0; @@ -1796,12 +1895,13 @@ static int __init firmware_class_init(void) static void __exit firmware_class_exit(void) { + disable_firmware(); #ifdef CONFIG_PM_SLEEP unregister_syscore_ops(&fw_syscore_ops); unregister_pm_notifier(&fw_cache.pm_notify); #endif -#ifdef CONFIG_FW_LOADER_USER_HELPER unregister_reboot_notifier(&fw_shutdown_nb); +#ifdef CONFIG_FW_LOADER_USER_HELPER class_unregister(&firmware_class); #endif } diff --git a/drivers/base/node.c b/drivers/base/node.c index 5548f9686016..0440d95c9b5b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -377,7 +377,7 @@ static int __ref get_nid_for_pfn(unsigned long pfn) if (!pfn_valid_within(pfn)) return -1; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT - if (system_state == SYSTEM_BOOTING) + if (system_state < SYSTEM_RUNNING) return early_pfn_to_nid(pfn); #endif page = pfn_to_page(pfn); diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c index 5917b4b5fb99..eb929dd6ef1e 100644 --- a/drivers/base/pinctrl.c +++ b/drivers/base/pinctrl.c @@ -23,6 +23,9 @@ int pinctrl_bind_pins(struct device *dev) { int ret; + if (dev->of_node_reused) + return 0; + dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL); if (!dev->pins) return -ENOMEM; diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 0fc7c4da7756..e5473525e7b2 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -195,7 +195,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, domain = msi_create_irq_domain(fwnode, info, parent); if (domain) - domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; + irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI); return domain; } @@ -345,8 +345,7 @@ platform_msi_create_device_domain(struct device *dev, data->host_data = host_data; domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec, - of_node_to_fwnode(dev->of_node), - ops, data); + dev->fwnode, ops, data); if (!domain) goto free_priv; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index c2456839214a..97332d094fe2 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -847,7 +847,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, struct platform_device *pdev = to_platform_device(dev); int len; - len = of_device_get_modalias(dev, buf, PAGE_SIZE -1); + len = of_device_modalias(dev, buf, PAGE_SIZE); if (len != -ENODEV) return len; @@ -866,7 +866,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; if (count > PATH_MAX) return -EINVAL; @@ -879,12 +879,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -895,8 +898,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); + ssize_t len; - return sprintf(buf, "%s\n", pdev->driver_override); + device_lock(dev); + len = sprintf(buf, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 2ac906288a6f..7f374233b498 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1690,8 +1690,6 @@ static struct generic_pm_domain *genpd_xlate_simple( struct of_phandle_args *genpdspec, void *data) { - if (genpdspec->args_count != 0) - return ERR_PTR(-EINVAL); return data; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9faee1c893e5..c99f8730de82 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -62,7 +62,7 @@ static pm_message_t pm_transition; static int async_error; -static char *pm_verb(int event) +static const char *pm_verb(int event) { switch (event) { case PM_EVENT_SUSPEND: @@ -208,7 +208,8 @@ static ktime_t initcall_debug_start(struct device *dev) } static void initcall_debug_report(struct device *dev, ktime_t calltime, - int error, pm_message_t state, char *info) + int error, pm_message_t state, + const char *info) { ktime_t rettime; s64 nsecs; @@ -403,21 +404,23 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat return NULL; } -static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) +static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) { dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? ", may wakeup" : ""); } -static void pm_dev_err(struct device *dev, pm_message_t state, char *info, +static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, int error) { printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", dev_name(dev), pm_verb(state.event), info, error); } -static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) +#ifdef CONFIG_PM_DEBUG +static void dpm_show_time(ktime_t starttime, pm_message_t state, + const char *info) { ktime_t calltime; u64 usecs64; @@ -433,9 +436,13 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) info ?: "", info ? " " : "", pm_verb(state.event), usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } +#else +static inline void dpm_show_time(ktime_t starttime, pm_message_t state, + const char *info) {} +#endif /* CONFIG_PM_DEBUG */ static int dpm_run_callback(pm_callback_t cb, struct device *dev, - pm_message_t state, char *info) + pm_message_t state, const char *info) { ktime_t calltime; int error; @@ -535,7 +542,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; - char *info = NULL; + const char *info = NULL; int error = 0; TRACE_DEVICE(dev); @@ -665,7 +672,7 @@ void dpm_resume_noirq(pm_message_t state) static int device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; - char *info = NULL; + const char *info = NULL; int error = 0; TRACE_DEVICE(dev); @@ -793,7 +800,7 @@ EXPORT_SYMBOL_GPL(dpm_resume_start); static int device_resume(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; - char *info = NULL; + const char *info = NULL; int error = 0; DECLARE_DPM_WATCHDOG_ON_STACK(wd); @@ -955,7 +962,7 @@ void dpm_resume(pm_message_t state) static void device_complete(struct device *dev, pm_message_t state) { void (*callback)(struct device *) = NULL; - char *info = NULL; + const char *info = NULL; if (dev->power.syscore) return; @@ -1080,7 +1087,7 @@ static pm_message_t resume_event(pm_message_t sleep_state) static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; - char *info = NULL; + const char *info = NULL; int error = 0; TRACE_DEVICE(dev); @@ -1091,11 +1098,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; - if (pm_wakeup_pending()) { - async_error = -EBUSY; - goto Complete; - } - if (dev->power.syscore || dev->power.direct_complete) goto Complete; @@ -1225,7 +1227,7 @@ int dpm_suspend_noirq(pm_message_t state) static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; - char *info = NULL; + const char *info = NULL; int error = 0; TRACE_DEVICE(dev); @@ -1384,7 +1386,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end); */ static int legacy_suspend(struct device *dev, pm_message_t state, int (*cb)(struct device *dev, pm_message_t state), - char *info) + const char *info) { int error; ktime_t calltime; @@ -1426,7 +1428,7 @@ static void dpm_clear_suppliers_direct_complete(struct device *dev) static int __device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; - char *info = NULL; + const char *info = NULL; int error = 0; DECLARE_DPM_WATCHDOG_ON_STACK(wd); diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index dae61720b314..a8cc14fd8ae4 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -180,7 +180,7 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { struct opp_table *opp_table; struct dev_pm_opp *opp; - struct regulator *reg, **regulators; + struct regulator *reg; unsigned long latency_ns = 0; int ret, i, count; struct { @@ -198,15 +198,9 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) if (!count) goto put_opp_table; - regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL); - if (!regulators) - goto put_opp_table; - uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) - goto free_regulators; - - memcpy(regulators, opp_table->regulators, count * sizeof(*regulators)); + goto put_opp_table; mutex_lock(&opp_table->lock); @@ -232,15 +226,13 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) * isn't freed, while we are executing this routine. */ for (i = 0; i < count; i++) { - reg = regulators[i]; + reg = opp_table->regulators[i]; ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); if (ret > 0) latency_ns += ret * 1000; } kfree(uV); -free_regulators: - kfree(regulators); put_opp_table: dev_pm_opp_put_opp_table(opp_table); @@ -543,17 +535,18 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk, return ret; } -static int _generic_set_opp(struct dev_pm_set_opp_data *data) +static int _generic_set_opp_regulator(const struct opp_table *opp_table, + struct device *dev, + unsigned long old_freq, + unsigned long freq, + struct dev_pm_opp_supply *old_supply, + struct dev_pm_opp_supply *new_supply) { - struct dev_pm_opp_supply *old_supply = data->old_opp.supplies; - struct dev_pm_opp_supply *new_supply = data->new_opp.supplies; - unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate; - struct regulator *reg = data->regulators[0]; - struct device *dev= data->dev; + struct regulator *reg = opp_table->regulators[0]; int ret; /* This function only supports single regulator per device */ - if (WARN_ON(data->regulator_count > 1)) { + if (WARN_ON(opp_table->regulator_count > 1)) { dev_err(dev, "multiple regulators are not supported\n"); return -EINVAL; } @@ -566,7 +559,7 @@ static int _generic_set_opp(struct dev_pm_set_opp_data *data) } /* Change frequency */ - ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq); + ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); if (ret) goto restore_voltage; @@ -580,12 +573,12 @@ static int _generic_set_opp(struct dev_pm_set_opp_data *data) return 0; restore_freq: - if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq)) + if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", __func__, old_freq); restore_voltage: /* This shouldn't harm even if the voltages weren't updated earlier */ - if (old_supply->u_volt) + if (old_supply) _set_opp_voltage(dev, reg, old_supply); return ret; @@ -603,10 +596,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { struct opp_table *opp_table; unsigned long freq, old_freq; - int (*set_opp)(struct dev_pm_set_opp_data *data); struct dev_pm_opp *old_opp, *opp; - struct regulator **regulators; - struct dev_pm_set_opp_data *data; struct clk *clk; int ret, size; @@ -661,38 +651,35 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, old_freq, freq); - regulators = opp_table->regulators; - /* Only frequency scaling */ - if (!regulators) { + if (!opp_table->regulators) { ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); - goto put_opps; - } + } else if (!opp_table->set_opp) { + ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, + IS_ERR(old_opp) ? NULL : old_opp->supplies, + opp->supplies); + } else { + struct dev_pm_set_opp_data *data; - if (opp_table->set_opp) - set_opp = opp_table->set_opp; - else - set_opp = _generic_set_opp; - - data = opp_table->set_opp_data; - data->regulators = regulators; - data->regulator_count = opp_table->regulator_count; - data->clk = clk; - data->dev = dev; - - data->old_opp.rate = old_freq; - size = sizeof(*opp->supplies) * opp_table->regulator_count; - if (IS_ERR(old_opp)) - memset(data->old_opp.supplies, 0, size); - else - memcpy(data->old_opp.supplies, old_opp->supplies, size); + data = opp_table->set_opp_data; + data->regulators = opp_table->regulators; + data->regulator_count = opp_table->regulator_count; + data->clk = clk; + data->dev = dev; - data->new_opp.rate = freq; - memcpy(data->new_opp.supplies, opp->supplies, size); + data->old_opp.rate = old_freq; + size = sizeof(*opp->supplies) * opp_table->regulator_count; + if (IS_ERR(old_opp)) + memset(data->old_opp.supplies, 0, size); + else + memcpy(data->old_opp.supplies, old_opp->supplies, size); - ret = set_opp(data); + data->new_opp.rate = freq; + memcpy(data->new_opp.supplies, opp->supplies, size); + + ret = opp_table->set_opp(data); + } -put_opps: dev_pm_opp_put(opp); put_old_opp: if (!IS_ERR(old_opp)) @@ -1376,6 +1363,73 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); /** + * dev_pm_opp_set_clkname() - Set clk name for the device + * @dev: Device for which clk name is being set. + * @name: Clk name. + * + * In order to support OPP switching, OPP layer needs to get pointer to the + * clock for the device. Simple cases work fine without using this routine (i.e. + * by passing connection-id as NULL), but for a device with multiple clocks + * available, the OPP core needs to know the exact name of the clk to use. + * + * This must be called before any OPPs are initialized for the device. + */ +struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) +{ + struct opp_table *opp_table; + int ret; + + opp_table = dev_pm_opp_get_opp_table(dev); + if (!opp_table) + return ERR_PTR(-ENOMEM); + + /* This should be called before OPPs are initialized */ + if (WARN_ON(!list_empty(&opp_table->opp_list))) { + ret = -EBUSY; + goto err; + } + + /* Already have default clk set, free it */ + if (!IS_ERR(opp_table->clk)) + clk_put(opp_table->clk); + + /* Find clk for the device */ + opp_table->clk = clk_get(dev, name); + if (IS_ERR(opp_table->clk)) { + ret = PTR_ERR(opp_table->clk); + if (ret != -EPROBE_DEFER) { + dev_err(dev, "%s: Couldn't find clock: %d\n", __func__, + ret); + } + goto err; + } + + return opp_table; + +err: + dev_pm_opp_put_opp_table(opp_table); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname); + +/** + * dev_pm_opp_put_clkname() - Releases resources blocked for clk. + * @opp_table: OPP table returned from dev_pm_opp_set_clkname(). + */ +void dev_pm_opp_put_clkname(struct opp_table *opp_table) +{ + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + clk_put(opp_table->clk); + opp_table->clk = ERR_PTR(-EINVAL); + + dev_pm_opp_put_opp_table(opp_table); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname); + +/** * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper * @dev: Device for which the helper is getting registered. * @set_opp: Custom set OPP helper. diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c index 95f433db4ac7..81cf120fcf43 100644 --- a/drivers/base/power/opp/debugfs.c +++ b/drivers/base/power/opp/debugfs.c @@ -40,11 +40,10 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp, struct dentry *pdentry) { struct dentry *d; - int i = 0; + int i; char *name; - /* Always create at least supply-0 directory */ - do { + for (i = 0; i < opp_table->regulator_count; i++) { name = kasprintf(GFP_KERNEL, "supply-%d", i); /* Create per-opp directory */ @@ -70,7 +69,7 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp, if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->supplies[i].u_amp)) return false; - } while (++i < opp_table->regulator_count); + } return true; } diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 779428676f63..57eec1ca0569 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -131,8 +131,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, prop = of_find_property(opp->np, name, NULL); /* Missing property isn't a problem, but an invalid entry is */ - if (!prop) - return 0; + if (!prop) { + if (!opp_table->regulator_count) + return 0; + + dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n", + __func__); + return -EINVAL; + } } vcount = of_property_count_u32_elems(opp->np, name); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 33b4b902741a..185a52581cfa 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -607,7 +607,7 @@ static struct attribute *power_attrs[] = { #endif /* CONFIG_PM_ADVANCED_DEBUG */ NULL, }; -static struct attribute_group pm_attr_group = { +static const struct attribute_group pm_attr_group = { .name = power_group_name, .attrs = power_attrs, }; @@ -629,7 +629,7 @@ static struct attribute *wakeup_attrs[] = { #endif NULL, }; -static struct attribute_group pm_wakeup_attr_group = { +static const struct attribute_group pm_wakeup_attr_group = { .name = power_group_name, .attrs = wakeup_attrs, }; @@ -644,7 +644,7 @@ static struct attribute *runtime_attrs[] = { &dev_attr_autosuspend_delay_ms.attr, NULL, }; -static struct attribute_group pm_runtime_attr_group = { +static const struct attribute_group pm_runtime_attr_group = { .name = power_group_name, .attrs = runtime_attrs, }; @@ -653,7 +653,7 @@ static struct attribute *pm_qos_resume_latency_attrs[] = { &dev_attr_pm_qos_resume_latency_us.attr, NULL, }; -static struct attribute_group pm_qos_resume_latency_attr_group = { +static const struct attribute_group pm_qos_resume_latency_attr_group = { .name = power_group_name, .attrs = pm_qos_resume_latency_attrs, }; @@ -662,7 +662,7 @@ static struct attribute *pm_qos_latency_tolerance_attrs[] = { &dev_attr_pm_qos_latency_tolerance_us.attr, NULL, }; -static struct attribute_group pm_qos_latency_tolerance_attr_group = { +static const struct attribute_group pm_qos_latency_tolerance_attr_group = { .name = power_group_name, .attrs = pm_qos_latency_tolerance_attrs, }; @@ -672,7 +672,7 @@ static struct attribute *pm_qos_flags_attrs[] = { &dev_attr_pm_qos_remote_wakeup.attr, NULL, }; -static struct attribute_group pm_qos_flags_attr_group = { +static const struct attribute_group pm_qos_flags_attr_group = { .name = power_group_name, .attrs = pm_qos_flags_attrs, }; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 136854970489..144e6d8fafc8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ unsigned int pm_wakeup_irq __read_mostly; -/* If set and the system is suspending, terminate the suspend. */ -static bool pm_abort_suspend __read_mostly; +/* If greater than 0 and the system is suspending, terminate the suspend. */ +static atomic_t pm_abort_suspend __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. @@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +DEFINE_STATIC_SRCU(wakeup_srcu); + static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), @@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws) spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); spin_unlock_irqrestore(&events_lock, flags); - synchronize_rcu(); + synchronize_srcu(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev) void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); - - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void) void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); - - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -525,12 +527,6 @@ static void wakeup_source_activate(struct wakeup_source *ws) "unregistered wakeup source\n")) return; - /* - * active wakeup source should bring the system - * out of PM_SUSPEND_FREEZE state - */ - freeze_wake(); - ws->active = true; ws->active_count++; ws->last_time = ktime_get(); @@ -546,8 +542,9 @@ static void wakeup_source_activate(struct wakeup_source *ws) /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. */ -static void wakeup_source_report_event(struct wakeup_source *ws) +static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { ws->event_count++; /* This is racy, but the counter is approximate anyway. */ @@ -556,6 +553,9 @@ static void wakeup_source_report_event(struct wakeup_source *ws) if (!ws->active) wakeup_source_activate(ws); + + if (hard) + pm_system_wakeup(); } /** @@ -573,7 +573,7 @@ void __pm_stay_awake(struct wakeup_source *ws) spin_lock_irqsave(&ws->lock, flags); - wakeup_source_report_event(ws); + wakeup_source_report_event(ws, false); del_timer(&ws->timer); ws->timer_expires = 0; @@ -739,9 +739,10 @@ static void pm_wakeup_timer_fn(unsigned long data) } /** - * __pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_ws_event - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the event source. * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Notify the PM core of a wakeup event whose source is @ws that will take * approximately @msec milliseconds to be processed by the kernel. If @ws is @@ -750,7 +751,7 @@ static void pm_wakeup_timer_fn(unsigned long data) * * It is safe to call this function from interrupt context. */ -void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) { unsigned long flags; unsigned long expires; @@ -760,7 +761,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) spin_lock_irqsave(&ws->lock, flags); - wakeup_source_report_event(ws); + wakeup_source_report_event(ws, hard); if (!msec) { wakeup_source_deactivate(ws); @@ -779,17 +780,17 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) unlock: spin_unlock_irqrestore(&ws->lock, flags); } -EXPORT_SYMBOL_GPL(__pm_wakeup_event); - +EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** * pm_wakeup_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * - * Call __pm_wakeup_event() for the @dev's wakeup source object. + * Call pm_wakeup_ws_event() for the @dev's wakeup source object. */ -void pm_wakeup_event(struct device *dev, unsigned int msec) +void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) { unsigned long flags; @@ -797,18 +798,18 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) return; spin_lock_irqsave(&dev->power.lock, flags); - __pm_wakeup_event(dev->power.wakeup, msec); + pm_wakeup_ws_event(dev->power.wakeup, msec, hard); spin_unlock_irqrestore(&dev->power.lock, flags); } -EXPORT_SYMBOL_GPL(pm_wakeup_event); +EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; - int active = 0; + int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_debug("active wakeup source: %s\n", ws->name); @@ -824,7 +825,7 @@ void pm_print_active_wakeup_sources(void) if (!active && last_activity_ws) pr_debug("last active wakeup source: %s\n", last_activity_ws->name); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); @@ -856,20 +857,26 @@ bool pm_wakeup_pending(void) pm_print_active_wakeup_sources(); } - return ret || pm_abort_suspend; + return ret || atomic_read(&pm_abort_suspend) > 0; } void pm_system_wakeup(void) { - pm_abort_suspend = true; + atomic_inc(&pm_abort_suspend); freeze_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); -void pm_wakeup_clear(void) +void pm_system_cancel_wakeup(void) +{ + atomic_dec(&pm_abort_suspend); +} + +void pm_wakeup_clear(bool reset) { - pm_abort_suspend = false; pm_wakeup_irq = 0; + if (reset) + atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) @@ -951,8 +958,9 @@ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { @@ -966,7 +974,7 @@ void pm_wakep_autosleep_enabled(bool set) } spin_unlock_irq(&ws->lock); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ @@ -1027,15 +1035,16 @@ static int print_wakeup_source_stats(struct seq_file *m, static int wakeup_sources_stats_show(struct seq_file *m, void *unused) { struct wakeup_source *ws; + int srcuidx; seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) print_wakeup_source_stats(m, ws); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); print_wakeup_source_stats(m, &deleted_ws); diff --git a/drivers/base/property.c b/drivers/base/property.c index c458c63e353f..149de311a10e 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_graph.h> #include <linux/property.h> #include <linux/etherdevice.h> #include <linux/phy.h> @@ -146,47 +147,45 @@ static int pset_prop_read_string_array(struct property_set *pset, const char *propname, const char **strings, size_t nval) { + const struct property_entry *prop; const void *pointer; - size_t length = nval * sizeof(*strings); + size_t array_len, length; + + /* Find out the array length. */ + prop = pset_prop_get(pset, propname); + if (!prop) + return -EINVAL; + + if (!prop->is_array) + /* The array length for a non-array string property is 1. */ + array_len = 1; + else + /* Find the length of an array. */ + array_len = pset_prop_count_elems_of_size(pset, propname, + sizeof(const char *)); + + /* Return how many there are if strings is NULL. */ + if (!strings) + return array_len; + + array_len = min(nval, array_len); + length = array_len * sizeof(*strings); pointer = pset_prop_find(pset, propname, length); if (IS_ERR(pointer)) return PTR_ERR(pointer); memcpy(strings, pointer, length); - return 0; -} -static int pset_prop_read_string(struct property_set *pset, - const char *propname, const char **strings) -{ - const struct property_entry *prop; - const char * const *pointer; - - prop = pset_prop_get(pset, propname); - if (!prop) - return -EINVAL; - if (!prop->is_string) - return -EILSEQ; - if (prop->is_array) { - pointer = prop->pointer.str; - if (!pointer) - return -ENODATA; - } else { - pointer = &prop->value.str; - if (*pointer && strnlen(*pointer, prop->length) >= prop->length) - return -EILSEQ; - } - - *strings = *pointer; - return 0; + return array_len; } -static inline struct fwnode_handle *dev_fwnode(struct device *dev) +struct fwnode_handle *dev_fwnode(struct device *dev) { return IS_ENABLED(CONFIG_OF) && dev->of_node ? &dev->of_node->fwnode : dev->fwnode; } +EXPORT_SYMBOL_GPL(dev_fwnode); /** * device_property_present - check if a property of a device is present @@ -340,8 +339,8 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array); * Function reads an array of string properties with @propname from the device * firmware description and stores them to @val if found. * - * Return: number of values if @val was %NULL, - * %0 if the property was found (success), + * Return: number of values read on success if @val is non-NULL, + * number of values available on success if @val is NULL, * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not an array of strings, @@ -553,25 +552,8 @@ static int __fwnode_property_read_string_array(struct fwnode_handle *fwnode, return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, val, nval); else if (is_pset_node(fwnode)) - return val ? - pset_prop_read_string_array(to_pset_node(fwnode), - propname, val, nval) : - pset_prop_count_elems_of_size(to_pset_node(fwnode), - propname, - sizeof(const char *)); - return -ENXIO; -} - -static int __fwnode_property_read_string(struct fwnode_handle *fwnode, - const char *propname, const char **val) -{ - if (is_of_node(fwnode)) - return of_property_read_string(to_of_node(fwnode), propname, val); - else if (is_acpi_node(fwnode)) - return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, - val, 1); - else if (is_pset_node(fwnode)) - return pset_prop_read_string(to_pset_node(fwnode), propname, val); + return pset_prop_read_string_array(to_pset_node(fwnode), + propname, val, nval); return -ENXIO; } @@ -585,11 +567,11 @@ static int __fwnode_property_read_string(struct fwnode_handle *fwnode, * Read an string list property @propname from the given firmware node and store * them to @val if found. * - * Return: number of values if @val was %NULL, - * %0 if the property was found (success), + * Return: number of values read on success if @val is non-NULL, + * number of values available on success if @val is NULL, * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, - * %-EPROTO if the property is not an array of strings, + * %-EPROTO or %-EILSEQ if the property is not an array of strings, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ @@ -626,14 +608,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val) { - int ret; + int ret = fwnode_property_read_string_array(fwnode, propname, val, 1); - ret = __fwnode_property_read_string(fwnode, propname, val); - if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && - !IS_ERR_OR_NULL(fwnode->secondary)) - ret = __fwnode_property_read_string(fwnode->secondary, - propname, val); - return ret; + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(fwnode_property_read_string); @@ -932,41 +909,109 @@ int device_add_properties(struct device *dev, EXPORT_SYMBOL_GPL(device_add_properties); /** - * device_get_next_child_node - Return the next child node handle for a device - * @dev: Device to find the next child node for. - * @child: Handle to one of the device's child nodes or a null handle. + * fwnode_get_next_parent - Iterate to the node's parent + * @fwnode: Firmware whose parent is retrieved + * + * This is like fwnode_get_parent() except that it drops the refcount + * on the passed node, making it suitable for iterating through a + * node's parents. + * + * Returns a node pointer with refcount incremented, use + * fwnode_handle_node() on it when done. */ -struct fwnode_handle *device_get_next_child_node(struct device *dev, +struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent = fwnode_get_parent(fwnode); + + fwnode_handle_put(fwnode); + + return parent; +} +EXPORT_SYMBOL_GPL(fwnode_get_next_parent); + +/** + * fwnode_get_parent - Return parent firwmare node + * @fwnode: Firmware whose parent is retrieved + * + * Return parent firmware node of the given node if possible or %NULL if no + * parent was available. + */ +struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent = NULL; + + if (is_of_node(fwnode)) { + struct device_node *node; + + node = of_get_parent(to_of_node(fwnode)); + if (node) + parent = &node->fwnode; + } else if (is_acpi_node(fwnode)) { + parent = acpi_node_get_parent(fwnode); + } + + return parent; +} +EXPORT_SYMBOL_GPL(fwnode_get_parent); + +/** + * fwnode_get_next_child_node - Return the next child node handle for a node + * @fwnode: Firmware node to find the next child node for. + * @child: Handle to one of the node's child nodes or a %NULL handle. + */ +struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode, struct fwnode_handle *child) { - if (IS_ENABLED(CONFIG_OF) && dev->of_node) { + if (is_of_node(fwnode)) { struct device_node *node; - node = of_get_next_available_child(dev->of_node, to_of_node(child)); + node = of_get_next_available_child(to_of_node(fwnode), + to_of_node(child)); if (node) return &node->fwnode; - } else if (IS_ENABLED(CONFIG_ACPI)) { - return acpi_get_next_subnode(dev, child); + } else if (is_acpi_node(fwnode)) { + return acpi_get_next_subnode(fwnode, child); } + return NULL; } +EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); + +/** + * device_get_next_child_node - Return the next child node handle for a device + * @dev: Device to find the next child node for. + * @child: Handle to one of the device's child nodes or a null handle. + */ +struct fwnode_handle *device_get_next_child_node(struct device *dev, + struct fwnode_handle *child) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct fwnode_handle *fwnode = NULL; + + if (dev->of_node) + fwnode = &dev->of_node->fwnode; + else if (adev) + fwnode = acpi_fwnode_handle(adev); + + return fwnode_get_next_child_node(fwnode, child); +} EXPORT_SYMBOL_GPL(device_get_next_child_node); /** - * device_get_named_child_node - Return first matching named child node handle - * @dev: Device to find the named child node for. + * fwnode_get_named_child_node - Return first matching named child node handle + * @fwnode: Firmware node to find the named child node for. * @childname: String to match child node name against. */ -struct fwnode_handle *device_get_named_child_node(struct device *dev, +struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode, const char *childname) { struct fwnode_handle *child; /* - * Find first matching named child node of this device. + * Find first matching named child node of this fwnode. * For ACPI this will be a data only sub-node. */ - device_for_each_child_node(dev, child) { + fwnode_for_each_child_node(fwnode, child) { if (is_of_node(child)) { if (!of_node_cmp(to_of_node(child)->name, childname)) return child; @@ -978,9 +1023,32 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev, return NULL; } +EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); + +/** + * device_get_named_child_node - Return first matching named child node handle + * @dev: Device to find the named child node for. + * @childname: String to match child node name against. + */ +struct fwnode_handle *device_get_named_child_node(struct device *dev, + const char *childname) +{ + return fwnode_get_named_child_node(dev_fwnode(dev), childname); +} EXPORT_SYMBOL_GPL(device_get_named_child_node); /** + * fwnode_handle_get - Obtain a reference to a device node + * @fwnode: Pointer to the device node to obtain the reference to. + */ +void fwnode_handle_get(struct fwnode_handle *fwnode) +{ + if (is_of_node(fwnode)) + of_node_get(to_of_node(fwnode)); +} +EXPORT_SYMBOL_GPL(fwnode_handle_get); + +/** * fwnode_handle_put - Drop reference to a device node * @fwnode: Pointer to the device node to drop the reference to. * @@ -1117,3 +1185,157 @@ void *device_get_mac_address(struct device *dev, char *addr, int alen) return device_get_mac_addr(dev, "address", addr, alen); } EXPORT_SYMBOL(device_get_mac_address); + +/** + * device_graph_get_next_endpoint - Get next endpoint firmware node + * @fwnode: Pointer to the parent firmware node + * @prev: Previous endpoint node or %NULL to get the first + * + * Returns an endpoint firmware node pointer or %NULL if no more endpoints + * are available. + */ +struct fwnode_handle * +fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + struct fwnode_handle *endpoint = NULL; + + if (is_of_node(fwnode)) { + struct device_node *node; + + node = of_graph_get_next_endpoint(to_of_node(fwnode), + to_of_node(prev)); + + if (node) + endpoint = &node->fwnode; + } else if (is_acpi_node(fwnode)) { + endpoint = acpi_graph_get_next_endpoint(fwnode, prev); + if (IS_ERR(endpoint)) + endpoint = NULL; + } + + return endpoint; + +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); + +/** + * fwnode_graph_get_remote_port_parent - Return fwnode of a remote device + * @fwnode: Endpoint firmware node pointing to the remote endpoint + * + * Extracts firmware node of a remote device the @fwnode points to. + */ +struct fwnode_handle * +fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent = NULL; + + if (is_of_node(fwnode)) { + struct device_node *node; + + node = of_graph_get_remote_port_parent(to_of_node(fwnode)); + if (node) + parent = &node->fwnode; + } else if (is_acpi_node(fwnode)) { + int ret; + + ret = acpi_graph_get_remote_endpoint(fwnode, &parent, NULL, + NULL); + if (ret) + return NULL; + } + + return parent; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); + +/** + * fwnode_graph_get_remote_port - Return fwnode of a remote port + * @fwnode: Endpoint firmware node pointing to the remote endpoint + * + * Extracts firmware node of a remote port the @fwnode points to. + */ +struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *port = NULL; + + if (is_of_node(fwnode)) { + struct device_node *node; + + node = of_graph_get_remote_port(to_of_node(fwnode)); + if (node) + port = &node->fwnode; + } else if (is_acpi_node(fwnode)) { + int ret; + + ret = acpi_graph_get_remote_endpoint(fwnode, NULL, &port, NULL); + if (ret) + return NULL; + } + + return port; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); + +/** + * fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint + * @fwnode: Endpoint firmware node pointing to the remote endpoint + * + * Extracts firmware node of a remote endpoint the @fwnode points to. + */ +struct fwnode_handle * +fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *endpoint = NULL; + + if (is_of_node(fwnode)) { + struct device_node *node; + + node = of_parse_phandle(to_of_node(fwnode), "remote-endpoint", + 0); + if (node) + endpoint = &node->fwnode; + } else if (is_acpi_node(fwnode)) { + int ret; + + ret = acpi_graph_get_remote_endpoint(fwnode, NULL, NULL, + &endpoint); + if (ret) + return NULL; + } + + return endpoint; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); + +/** + * fwnode_graph_parse_endpoint - parse common endpoint node properties + * @fwnode: pointer to endpoint fwnode_handle + * @endpoint: pointer to the fwnode endpoint data structure + * + * Parse @fwnode representing a graph endpoint node and store the + * information in @endpoint. The caller must hold a reference to + * @fwnode. + */ +int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint) +{ + struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode); + + memset(endpoint, 0, sizeof(*endpoint)); + + endpoint->local_fwnode = fwnode; + + if (is_acpi_node(port_fwnode)) { + fwnode_property_read_u32(port_fwnode, "port", &endpoint->port); + fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id); + } else { + fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port); + fwnode_property_read_u32(fwnode, "reg", &endpoint->id); + } + + fwnode_handle_put(port_fwnode); + + return 0; +} +EXPORT_SYMBOL(fwnode_graph_parse_endpoint); diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index db9d00c36a3e..073c0b77e5b3 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -3,10 +3,13 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ) + select IRQ_DOMAIN if REGMAP_IRQ + bool + +config REGCACHE_COMPRESSED select LZO_COMPRESS select LZO_DECOMPRESS - select IRQ_DOMAIN if REGMAP_IRQ bool config REGMAP_AC97 @@ -24,6 +27,10 @@ config REGMAP_SPMI tristate depends on SPMI +config REGMAP_W1 + tristate + depends on W1 + config REGMAP_MMIO tristate diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 609e4c84f485..0cf4abc8fbf1 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -2,7 +2,8 @@ CFLAGS_regmap.o := -I$(src) obj-$(CONFIG_REGMAP) += regmap.o regcache.o -obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o +obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o +obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o @@ -10,3 +11,4 @@ obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o +obj-$(CONFIG_REGMAP_W1) += regmap-w1.o diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index b0a0dcf32fb7..773560348337 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -21,7 +21,9 @@ static const struct regcache_ops *cache_types[] = { ®cache_rbtree_ops, +#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED) ®cache_lzo_ops, +#endif ®cache_flat_ops, }; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index cd54189f2b1d..429ca8ed7e51 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -60,6 +60,16 @@ static void regmap_irq_lock(struct irq_data *data) mutex_lock(&d->lock); } +static int regmap_irq_update_bits(struct regmap_irq_chip_data *d, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + if (d->chip->mask_writeonly) + return regmap_write_bits(d->map, reg, mask, val); + else + return regmap_update_bits(d->map, reg, mask, val); +} + static void regmap_irq_sync_unlock(struct irq_data *data) { struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); @@ -84,11 +94,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data) reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) { - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], ~d->mask_buf[i]); } else if (d->chip->unmask_base) { /* set mask with mask_base register */ - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], ~d->mask_buf[i]); if (ret < 0) dev_err(d->map->dev, @@ -97,12 +107,12 @@ static void regmap_irq_sync_unlock(struct irq_data *data) unmask_offset = d->chip->unmask_base - d->chip->mask_base; /* clear mask with unmask_base register */ - ret = regmap_update_bits(d->map, + ret = regmap_irq_update_bits(d, reg + unmask_offset, d->mask_buf_def[i], d->mask_buf[i]); } else { - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], d->mask_buf[i]); } if (ret != 0) @@ -113,11 +123,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data) (i * map->reg_stride * d->irq_reg_stride); if (d->wake_buf) { if (d->chip->wake_invert) - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) @@ -153,10 +163,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data) reg = d->chip->type_base + (i * map->reg_stride * d->type_reg_stride); if (d->chip->type_invert) - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->type_buf_def[i], ~d->type_buf[i]); else - ret = regmap_update_bits(d->map, reg, + ret = regmap_irq_update_bits(d, reg, d->type_buf_def[i], d->type_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync type in %x\n", @@ -394,7 +404,7 @@ static int regmap_irq_map(struct irq_domain *h, unsigned int virq, static const struct irq_domain_ops regmap_domain_ops = { .map = regmap_irq_map, - .xlate = irq_domain_xlate_twocell, + .xlate = irq_domain_xlate_onetwocell, }; /** @@ -519,17 +529,17 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, reg = chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (chip->mask_invert) - ret = regmap_update_bits(map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf[i], ~d->mask_buf[i]); else if (d->chip->unmask_base) { unmask_offset = d->chip->unmask_base - d->chip->mask_base; - ret = regmap_update_bits(d->map, + ret = regmap_irq_update_bits(d, reg + unmask_offset, d->mask_buf[i], d->mask_buf[i]); } else - ret = regmap_update_bits(map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf[i], d->mask_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", @@ -575,11 +585,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, (i * map->reg_stride * d->irq_reg_stride); if (chip->wake_invert) - ret = regmap_update_bits(map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], 0); else - ret = regmap_update_bits(map, reg, + ret = regmap_irq_update_bits(d, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) { @@ -603,10 +613,10 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, reg = chip->type_base + (i * map->reg_stride * d->type_reg_stride); if (chip->type_invert) - ret = regmap_update_bits(map, reg, + ret = regmap_irq_update_bits(d, reg, d->type_buf_def[i], 0xFF); else - ret = regmap_update_bits(map, reg, + ret = regmap_irq_update_bits(d, reg, d->type_buf_def[i], 0x0); if (ret != 0) { dev_err(map->dev, diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c new file mode 100644 index 000000000000..5f04e7bf063e --- /dev/null +++ b/drivers/base/regmap/regmap-w1.c @@ -0,0 +1,245 @@ +/* + * Register map access API - W1 (1-Wire) support + * + * Copyright (C) 2017 OAO Radioavionica + * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + */ + +#include <linux/regmap.h> +#include <linux/module.h> +#include "../../w1/w1.h" + +#include "internal.h" + +#define W1_CMD_READ_DATA 0x69 +#define W1_CMD_WRITE_DATA 0x6C + +/* + * 1-Wire slaves registers with addess 8 bit and data 8 bit + */ + +static int w1_reg_a8_v8_read(void *context, unsigned int reg, unsigned int *val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_READ_DATA); + w1_write_8(sl->master, reg); + *val = w1_read_8(sl->master); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +static int w1_reg_a8_v8_write(void *context, unsigned int reg, unsigned int val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_WRITE_DATA); + w1_write_8(sl->master, reg); + w1_write_8(sl->master, val); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +/* + * 1-Wire slaves registers with addess 8 bit and data 16 bit + */ + +static int w1_reg_a8_v16_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_READ_DATA); + w1_write_8(sl->master, reg); + *val = w1_read_8(sl->master); + *val |= w1_read_8(sl->master)<<8; + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +static int w1_reg_a8_v16_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_WRITE_DATA); + w1_write_8(sl->master, reg); + w1_write_8(sl->master, val & 0x00FF); + w1_write_8(sl->master, val>>8 & 0x00FF); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +/* + * 1-Wire slaves registers with addess 16 bit and data 16 bit + */ + +static int w1_reg_a16_v16_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 65535) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_READ_DATA); + w1_write_8(sl->master, reg & 0x00FF); + w1_write_8(sl->master, reg>>8 & 0x00FF); + *val = w1_read_8(sl->master); + *val |= w1_read_8(sl->master)<<8; + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +static int w1_reg_a16_v16_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 65535) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_WRITE_DATA); + w1_write_8(sl->master, reg & 0x00FF); + w1_write_8(sl->master, reg>>8 & 0x00FF); + w1_write_8(sl->master, val & 0x00FF); + w1_write_8(sl->master, val>>8 & 0x00FF); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +/* + * Various types of supported bus addressing + */ + +static struct regmap_bus regmap_w1_bus_a8_v8 = { + .reg_read = w1_reg_a8_v8_read, + .reg_write = w1_reg_a8_v8_write, +}; + +static struct regmap_bus regmap_w1_bus_a8_v16 = { + .reg_read = w1_reg_a8_v16_read, + .reg_write = w1_reg_a8_v16_write, +}; + +static struct regmap_bus regmap_w1_bus_a16_v16 = { + .reg_read = w1_reg_a16_v16_read, + .reg_write = w1_reg_a16_v16_write, +}; + +static const struct regmap_bus *regmap_get_w1_bus(struct device *w1_dev, + const struct regmap_config *config) +{ + if (config->reg_bits == 8 && config->val_bits == 8) + return ®map_w1_bus_a8_v8; + + if (config->reg_bits == 8 && config->val_bits == 16) + return ®map_w1_bus_a8_v16; + + if (config->reg_bits == 16 && config->val_bits == 16) + return ®map_w1_bus_a16_v16; + + return ERR_PTR(-ENOTSUPP); +} + +struct regmap *__regmap_init_w1(struct device *w1_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + + const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __regmap_init(w1_dev, bus, w1_dev, config, + lock_key, lock_name); + + return NULL; +} +EXPORT_SYMBOL_GPL(__regmap_init_w1); + +struct regmap *__devm_regmap_init_w1(struct device *w1_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + + const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __devm_regmap_init(w1_dev, bus, w1_dev, config, + lock_key, lock_name); + + return NULL; +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_w1); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/soc.c b/drivers/base/soc.c index dc26e5949a32..909dedae4c4e 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -109,15 +109,18 @@ static void soc_release(struct device *dev) kfree(soc_dev); } +static struct soc_device_attribute *early_soc_dev_attr; + struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr) { struct soc_device *soc_dev; int ret; if (!soc_bus_type.p) { - ret = bus_register(&soc_bus_type); - if (ret) - goto out1; + if (early_soc_dev_attr) + return ERR_PTR(-EBUSY); + early_soc_dev_attr = soc_dev_attr; + return NULL; } soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL); @@ -159,45 +162,53 @@ void soc_device_unregister(struct soc_device *soc_dev) ida_simple_remove(&soc_ida, soc_dev->soc_dev_num); device_unregister(&soc_dev->dev); + early_soc_dev_attr = NULL; } static int __init soc_bus_register(void) { - if (soc_bus_type.p) - return 0; + int ret; - return bus_register(&soc_bus_type); + ret = bus_register(&soc_bus_type); + if (ret) + return ret; + + if (early_soc_dev_attr) + return PTR_ERR(soc_device_register(early_soc_dev_attr)); + + return 0; } core_initcall(soc_bus_register); -static int soc_device_match_one(struct device *dev, void *arg) +static int soc_device_match_attr(const struct soc_device_attribute *attr, + const struct soc_device_attribute *match) { - struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); - const struct soc_device_attribute *match = arg; - if (match->machine && - (!soc_dev->attr->machine || - !glob_match(match->machine, soc_dev->attr->machine))) + (!attr->machine || !glob_match(match->machine, attr->machine))) return 0; if (match->family && - (!soc_dev->attr->family || - !glob_match(match->family, soc_dev->attr->family))) + (!attr->family || !glob_match(match->family, attr->family))) return 0; if (match->revision && - (!soc_dev->attr->revision || - !glob_match(match->revision, soc_dev->attr->revision))) + (!attr->revision || !glob_match(match->revision, attr->revision))) return 0; if (match->soc_id && - (!soc_dev->attr->soc_id || - !glob_match(match->soc_id, soc_dev->attr->soc_id))) + (!attr->soc_id || !glob_match(match->soc_id, attr->soc_id))) return 0; return 1; } +static int soc_device_match_one(struct device *dev, void *arg) +{ + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); + + return soc_device_match_attr(soc_dev->attr, arg); +} + /* * soc_device_match - identify the SoC in the machine * @matches: zero-terminated array of possible matches @@ -230,6 +241,11 @@ const struct soc_device_attribute *soc_device_match( break; ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches, soc_device_match_one); + if (ret < 0 && early_soc_dev_attr) + ret = soc_device_match_attr(early_soc_dev_attr, + matches); + if (ret < 0) + return NULL; if (!ret) matches++; else |