diff options
Diffstat (limited to 'drivers/base')
42 files changed, 1251 insertions, 487 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 7fb21768ca36..8074a10183dc 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o \ topology.o container.o property.o cacheinfo.o \ - swnode.o + swnode.o faux.o obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o obj-y += power/ diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 3ebe77566788..af0029d30dbe 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -11,6 +11,7 @@ #include <linux/cleanup.h> #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/cpu_smt.h> #include <linux/device.h> #include <linux/of.h> #include <linux/slab.h> @@ -28,7 +29,7 @@ static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; static bool scale_freq_invariant; -DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1; +DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0; EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref); static bool supports_scale_freq_counters(const struct cpumask *cpus) @@ -293,13 +294,15 @@ void topology_normalize_cpu_scale(void) capacity_scale = 1; for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); + capacity = raw_capacity[cpu] * + (per_cpu(capacity_freq_ref, cpu) ?: 1); capacity_scale = max(capacity, capacity_scale); } pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); + capacity = raw_capacity[cpu] * + (per_cpu(capacity_freq_ref, cpu) ?: 1); capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, capacity_scale); topology_set_cpu_scale(cpu, capacity); @@ -506,6 +509,10 @@ core_initcall(free_raw_capacity); #endif #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) + +/* Used to enable the SMT control */ +static unsigned int max_smt_thread_num = 1; + /* * This function returns the logic cpu number of the node. * There are basically three kinds of return values: @@ -565,6 +572,8 @@ static int __init parse_core(struct device_node *core, int package_id, i++; } while (1); + max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i); + cpu = get_cpu_for_node(core); if (cpu >= 0) { if (!leaf) { @@ -677,6 +686,17 @@ static int __init parse_socket(struct device_node *socket) if (!has_socket) ret = parse_cluster(socket, 0, -1, 0); + /* + * Reset the max_smt_thread_num to 1 on failure. Since on failure + * we need to notify the framework the SMT is not supported, but + * max_smt_thread_num can be initialized to the SMT thread number + * of the cores which are successfully parsed. + */ + if (ret) + max_smt_thread_num = 1; + + cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); + return ret; } diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index afa4df4c5a3f..95717d509ca9 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -156,6 +156,16 @@ * }, * .ops = my_custom_ops, * }; + * + * Please note that such custom ops approach is valid, but it is hard to implement + * it right without global locks per-device to protect from auxiliary_drv removal + * during call to that ops. In addition, this implementation lacks proper module + * dependency, which causes to load/unload races between auxiliary parent and devices + * modules. + * + * The most easiest way to provide these ops reliably without needing to + * have a lock is to EXPORT_SYMBOL*() them and rely on already existing + * modules infrastructure for validity and correct dependencies chains. */ static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, diff --git a/drivers/base/base.h b/drivers/base/base.h index 8cf04a557bdb..123031a757d9 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp) kset_put(&sp->subsys); } +struct subsys_private *bus_to_subsys(const struct bus_type *bus); struct subsys_private *class_to_subsys(const struct class *class); struct driver_private { @@ -137,6 +138,7 @@ int hypervisor_init(void); static inline int hypervisor_init(void) { return 0; } #endif int platform_bus_init(void); +int faux_bus_init(void); void cpu_dev_init(void); void container_dev_init(void); #ifdef CONFIG_AUXILIARY_BUS @@ -179,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups); void device_driver_detach(struct device *dev); +static inline void device_set_driver(struct device *dev, const struct device_driver *drv) +{ + /* + * Majority (all?) read accesses to dev->driver happens either + * while holding device lock or in bus/driver code that is only + * invoked when the device is bound to a driver and there is no + * concern of the pointer being changed while it is being read. + * However when reading device's uevent file we read driver pointer + * without taking device lock (so we do not block there for + * arbitrary amount of time). We use WRITE_ONCE() here to prevent + * tearing so that READ_ONCE() can safely be used in uevent code. + */ + // FIXME - this cast should not be needed "soon" + WRITE_ONCE(dev->driver, (struct device_driver *)drv); +} + int devres_release_all(struct device *dev); void device_block_probing(void); void device_unblock_probing(void); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 657c93c38b0d..5e75e1bce551 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, * NULL. A call to subsys_put() must be done when finished with the pointer in * order for it to be properly freed. */ -static struct subsys_private *bus_to_subsys(const struct bus_type *bus) +struct subsys_private *bus_to_subsys(const struct bus_type *bus) { struct subsys_private *sp = NULL; struct kobject *kobj; @@ -354,7 +354,7 @@ static struct device *next_device(struct klist_iter *i) * count in the supplied callback. */ int bus_for_each_dev(const struct bus_type *bus, struct device *start, - void *data, int (*fn)(struct device *, void *)) + void *data, device_iter_t fn) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; @@ -402,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus, klist_iter_init_node(&sp->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); - while ((dev = next_device(&i))) - if (match(dev, data) && get_device(dev)) + while ((dev = next_device(&i))) { + if (match(dev, data)) { + get_device(dev); break; + } + } klist_iter_exit(&i); subsys_put(sp); return dev; @@ -1288,7 +1291,7 @@ EXPORT_SYMBOL_GPL(subsys_system_register); * @groups: default attributes for the root device * * All 'virtual' subsystems have a /sys/devices/system/<name> root device - * with the name of the subystem. The root device can carry subsystem-wide + * with the name of the subsystem. The root device can carry subsystem-wide * attributes. All registered devices are below this single root device. * There's no restriction on device naming. This is for kernel software * constructs which need sysfs interface. diff --git a/drivers/base/class.c b/drivers/base/class.c index d57f277978dc..2526c57d924e 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit); * code. There's no locking restriction. */ int class_for_each_device(const struct class *class, const struct device *start, - void *data, int (*fn)(struct device *, void *)) + void *data, device_iter_t fn) { struct subsys_private *sp = class_to_subsys(class); struct class_dev_iter iter; @@ -601,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister); * a bus device * @cls: the compatibility class * @dev: the target bus device - * @device_link: an optional device to which a "device" link should be created */ -int class_compat_create_link(struct class_compat *cls, struct device *dev, - struct device *device_link) +int class_compat_create_link(struct class_compat *cls, struct device *dev) { - int error; - - error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); - if (error) - return error; - - /* - * Optionally add a "device" link (typically to the parent), as a - * class device would have one and we want to provide as much - * backwards compatibility as possible. - */ - if (device_link) { - error = sysfs_create_link(&dev->kobj, &device_link->kobj, - "device"); - if (error) - sysfs_remove_link(cls->kobj, dev_name(dev)); - } - - return error; + return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); } EXPORT_SYMBOL_GPL(class_compat_create_link); @@ -633,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link); * a bus device * @cls: the compatibility class * @dev: the target bus device - * @device_link: an optional device to which a "device" link was previously - * created */ -void class_compat_remove_link(struct class_compat *cls, struct device *dev, - struct device *device_link) +void class_compat_remove_link(struct class_compat *cls, struct device *dev) { - if (device_link) - sysfs_remove_link(&dev->kobj, "device"); sysfs_remove_link(cls->kobj, dev_name(dev)); } EXPORT_SYMBOL_GPL(class_compat_remove_link); diff --git a/drivers/base/component.c b/drivers/base/component.c index 741497324d78..abe60eb45c55 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -87,17 +87,17 @@ static int component_devices_show(struct seq_file *s, void *data) size_t i; mutex_lock(&component_mutex); - seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status"); - seq_puts(s, "-------------------------------------------------------------\n"); - seq_printf(s, "%-40s %20s\n\n", + seq_printf(s, "%-50s %20s\n", "aggregate_device name", "status"); + seq_puts(s, "-----------------------------------------------------------------------\n"); + seq_printf(s, "%-50s %20s\n\n", dev_name(m->parent), m->bound ? "bound" : "not bound"); - seq_printf(s, "%-40s %20s\n", "device name", "status"); - seq_puts(s, "-------------------------------------------------------------\n"); + seq_printf(s, "%-50s %20s\n", "device name", "status"); + seq_puts(s, "-----------------------------------------------------------------------\n"); for (i = 0; i < match->num; i++) { struct component *component = match->compare[i].component; - seq_printf(s, "%-40s %20s\n", + seq_printf(s, "%-50s %20s\n", component ? dev_name(component->dev) : "(unknown)", component ? (component->bound ? "bound" : "not bound") : "not registered"); } @@ -569,11 +569,28 @@ void component_master_del(struct device *parent, } EXPORT_SYMBOL_GPL(component_master_del); +bool component_master_is_bound(struct device *parent, + const struct component_master_ops *ops) +{ + struct aggregate_device *adev; + + guard(mutex)(&component_mutex); + adev = __aggregate_find(parent, ops); + if (!adev) + return 0; + + return adev->bound; +} +EXPORT_SYMBOL_GPL(component_master_is_bound); + static void component_unbind(struct component *component, struct aggregate_device *adev, void *data) { WARN_ON(!component->bound); + dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n", + dev_name(component->dev), component, component->ops); + if (component->ops && component->ops->unbind) component->ops->unbind(component->dev, adev->parent, data); component->bound = false; diff --git a/drivers/base/core.c b/drivers/base/core.c index 70dbf8706980..cbc0099d8ef2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj) return NULL; } +/* + * Try filling "DRIVER=<name>" uevent variable for a device. Because this + * function may race with binding and unbinding the device from a driver, + * we need to be careful. Binding is generally safe, at worst we miss the + * fact that the device is already bound to a driver (but the driver + * information that is delivered through uevents is best-effort, it may + * become obsolete as soon as it is generated anyways). Unbinding is more + * risky as driver pointer is transitioning to NULL, so READ_ONCE() should + * be used to make sure we are dealing with the same pointer, and to + * ensure that driver structure is not going to disappear from under us + * we take bus' drivers klist lock. The assumption that only registered + * driver can be bound to a device, and to unregister a driver bus code + * will take the same lock. + */ +static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + struct subsys_private *sp = bus_to_subsys(dev->bus); + + if (sp) { + scoped_guard(spinlock, &sp->klist_drivers.k_lock) { + struct device_driver *drv = READ_ONCE(dev->driver); + if (drv) + add_uevent_var(env, "DRIVER=%s", drv->name); + } + + subsys_put(sp); + } +} + static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); @@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - if (dev->driver) - add_uevent_var(env, "DRIVER=%s", dev->driver->name); + /* Add "DRIVER=%s" variable if the device is bound to a driver */ + dev_driver_uevent(dev, env); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; - /* Synchronize with really_probe() */ - device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); - device_unlock(dev); if (retval) goto out; @@ -3700,7 +3726,7 @@ done: device_pm_remove(dev); dpm_sysfs_remove(dev); DPMError: - dev->driver = NULL; + device_set_driver(dev, NULL); bus_remove_device(dev); BusError: device_remove_attrs(dev); @@ -3981,7 +4007,7 @@ const char *device_get_devnode(const struct device *dev, * other than 0, we break out and return that value. */ int device_for_each_child(struct device *parent, void *data, - int (*fn)(struct device *dev, void *data)) + device_iter_t fn) { struct klist_iter i; struct device *child; @@ -4011,7 +4037,7 @@ EXPORT_SYMBOL_GPL(device_for_each_child); * other than 0, we break out and return that value. */ int device_for_each_child_reverse(struct device *parent, void *data, - int (*fn)(struct device *dev, void *data)) + device_iter_t fn) { struct klist_iter i; struct device *child; @@ -4044,14 +4070,14 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse); * device_for_each_child_reverse_from(); */ int device_for_each_child_reverse_from(struct device *parent, - struct device *from, const void *data, - int (*fn)(struct device *, const void *)) + struct device *from, void *data, + device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; - if (!parent->p) + if (!parent || !parent->p) return 0; klist_iter_init_node(&parent->p->klist_children, &i, @@ -4080,8 +4106,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from); * * NOTE: you will need to drop the reference with put_device() after use. */ -struct device *device_find_child(struct device *parent, void *data, - int (*match)(struct device *dev, void *data)) +struct device *device_find_child(struct device *parent, const void *data, + device_match_t match) { struct klist_iter i; struct device *child; @@ -4090,62 +4116,17 @@ struct device *device_find_child(struct device *parent, void *data, return NULL; klist_iter_init(&parent->p->klist_children, &i); - while ((child = next_device(&i))) - if (match(child, data) && get_device(child)) + while ((child = next_device(&i))) { + if (match(child, data)) { + get_device(child); break; + } + } klist_iter_exit(&i); return child; } EXPORT_SYMBOL_GPL(device_find_child); -/** - * device_find_child_by_name - device iterator for locating a child device. - * @parent: parent struct device - * @name: name of the child device - * - * This is similar to the device_find_child() function above, but it - * returns a reference to a device that has the name @name. - * - * NOTE: you will need to drop the reference with put_device() after use. - */ -struct device *device_find_child_by_name(struct device *parent, - const char *name) -{ - struct klist_iter i; - struct device *child; - - if (!parent) - return NULL; - - klist_iter_init(&parent->p->klist_children, &i); - while ((child = next_device(&i))) - if (sysfs_streq(dev_name(child), name) && get_device(child)) - break; - klist_iter_exit(&i); - return child; -} -EXPORT_SYMBOL_GPL(device_find_child_by_name); - -static int match_any(struct device *dev, void *unused) -{ - return 1; -} - -/** - * device_find_any_child - device iterator for locating a child device, if any. - * @parent: parent struct device - * - * This is similar to the device_find_child() function above, but it - * returns a reference to a child device, if any. - * - * NOTE: you will need to drop the reference with put_device() after use. - */ -struct device *device_find_any_child(struct device *parent) -{ - return device_find_child(parent, NULL, match_any); -} -EXPORT_SYMBOL_GPL(device_find_any_child); - int __init devices_init(void) { devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); @@ -5217,6 +5198,67 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) EXPORT_SYMBOL_GPL(set_secondary_fwnode); /** + * device_remove_of_node - Remove an of_node from a device + * @dev: device whose device tree node is being removed + */ +void device_remove_of_node(struct device *dev) +{ + dev = get_device(dev); + if (!dev) + return; + + if (!dev->of_node) + goto end; + + if (dev->fwnode == of_fwnode_handle(dev->of_node)) + dev->fwnode = NULL; + + of_node_put(dev->of_node); + dev->of_node = NULL; + +end: + put_device(dev); +} +EXPORT_SYMBOL_GPL(device_remove_of_node); + +/** + * device_add_of_node - Add an of_node to an existing device + * @dev: device whose device tree node is being added + * @of_node: of_node to add + * + * Return: 0 on success or error code on failure. + */ +int device_add_of_node(struct device *dev, struct device_node *of_node) +{ + int ret; + + if (!of_node) + return -EINVAL; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + if (dev->of_node) { + dev_err(dev, "Cannot replace node %pOF with %pOF\n", + dev->of_node, of_node); + ret = -EBUSY; + goto end; + } + + dev->of_node = of_node_get(of_node); + + if (!dev->fwnode) + dev->fwnode = of_fwnode_handle(of_node); + + ret = 0; +end: + put_device(dev); + return ret; +} +EXPORT_SYMBOL_GPL(device_add_of_node); + +/** * device_set_of_node_from_dev - reuse device-tree node of another device * @dev: device whose device-tree node is being set * @dev2: device whose device-tree node is being reused @@ -5245,15 +5287,21 @@ int device_match_name(struct device *dev, const void *name) } EXPORT_SYMBOL_GPL(device_match_name); +int device_match_type(struct device *dev, const void *type) +{ + return dev->type == type; +} +EXPORT_SYMBOL_GPL(device_match_type); + int device_match_of_node(struct device *dev, const void *np) { - return dev->of_node == np; + return np && dev->of_node == np; } EXPORT_SYMBOL_GPL(device_match_of_node); int device_match_fwnode(struct device *dev, const void *fwnode) { - return dev_fwnode(dev) == fwnode; + return fwnode && dev_fwnode(dev) == fwnode; } EXPORT_SYMBOL_GPL(device_match_fwnode); @@ -5265,13 +5313,13 @@ EXPORT_SYMBOL_GPL(device_match_devt); int device_match_acpi_dev(struct device *dev, const void *adev) { - return ACPI_COMPANION(dev) == adev; + return adev && ACPI_COMPANION(dev) == adev; } EXPORT_SYMBOL(device_match_acpi_dev); int device_match_acpi_handle(struct device *dev, const void *handle) { - return ACPI_HANDLE(dev) == handle; + return handle && ACPI_HANDLE(dev) == handle; } EXPORT_SYMBOL(device_match_acpi_handle); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index fdaa24bb641a..50651435577c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -599,6 +599,8 @@ CPU_SHOW_VULN_FALLBACK(retbleed); CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); +CPU_SHOW_VULN_FALLBACK(ghostwrite); +CPU_SHOW_VULN_FALLBACK(indirect_target_selection); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -614,6 +616,8 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); +static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); +static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -630,6 +634,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_spec_rstack_overflow.attr, &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, + &dev_attr_ghostwrite.attr, + &dev_attr_indirect_target_selection.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index f0e4b4aba885..b526e0e0f52d 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev) arch_teardown_dma_ops(dev); kfree(dev->dma_range_map); dev->dma_range_map = NULL; - dev->driver = NULL; + device_set_driver(dev, NULL); dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); @@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv) } re_probe: - // FIXME - this cast should not be needed "soon" - dev->driver = (struct device_driver *)drv; + device_set_driver(dev, drv); /* If using pinctrl, bind pins now before probing */ ret = pinctrl_bind_pins(dev); @@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async) if (ret == 0) ret = 1; else { - dev->driver = NULL; + device_set_driver(dev, NULL); ret = 0; } } else { diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index c795edad1b96..03a39c417dc4 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -41,7 +41,7 @@ struct devcd_entry { * devcd_data_write() * mod_delayed_work() * try_to_grab_pending() - * del_timer() + * timer_delete() * debug_assert_init() * INIT_DELAYED_WORK() * schedule_delayed_work() @@ -106,7 +106,7 @@ static void devcd_del(struct work_struct *wk) } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -116,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, } static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -132,19 +132,15 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute devcd_attr_data = { - .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, - .size = 0, - .read = devcd_data_read, - .write = devcd_data_write, -}; +static const struct bin_attribute devcd_attr_data = + __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0); -static struct bin_attribute *devcd_dev_bin_attrs[] = { +static const struct bin_attribute *const devcd_dev_bin_attrs[] = { &devcd_attr_data, NULL, }; static const struct attribute_group devcd_dev_group = { - .bin_attrs = devcd_dev_bin_attrs, + .bin_attrs_new = devcd_dev_bin_attrs, }; static const struct attribute_group *devcd_dev_groups[] = { @@ -186,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri * mutex_lock(&devcd->mutex); * * - * In the above diagram, It looks like disabled_store() would be racing with parallely + * In the above diagram, it looks like disabled_store() would be racing with parallelly * running devcd_del() and result in memory abort while acquiring devcd->mutex which - * is called after kfree of devcd memory after dropping its last reference with + * is called after kfree of devcd memory after dropping its last reference with * put_device(). However, this will not happens as fn(dev, data) runs * with its own reference to device via klist_node so it is not its last reference. * so, above situation would not occur. @@ -285,6 +281,8 @@ static void devcd_free_sgtable(void *data) * @offset: start copy from @offset@ bytes from the head of the data * in the given scatterlist * @data_len: the length of the data in the sg_table + * + * Returns: the number of bytes copied */ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, size_t buf_len, void *data, diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 2152eec0c135..d8a733ea5e1a 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -576,7 +576,10 @@ void *devres_open_group(struct device *dev, void *id, gfp_t gfp) } EXPORT_SYMBOL_GPL(devres_open_group); -/* Find devres group with ID @id. If @id is NULL, look for the latest. */ +/* + * Find devres group with ID @id. If @id is NULL, look for the latest open + * group. + */ static struct devres_group *find_group(struct device *dev, void *id) { struct devres_node *node; @@ -687,6 +690,13 @@ int devres_release_group(struct device *dev, void *id) spin_unlock_irqrestore(&dev->devres_lock, flags); release_nodes(dev, &todo); + } else if (list_empty(&dev->devres_head)) { + /* + * dev is probably dying via devres_release_all(): groups + * have already been removed and are on the process of + * being released - don't touch and don't warn. + */ + spin_unlock_irqrestore(&dev->devres_lock, flags); } else { WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); @@ -750,25 +760,38 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co EXPORT_SYMBOL_GPL(__devm_add_action); /** - * devm_remove_action() - removes previously added custom action + * devm_remove_action_nowarn() - removes previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Removes instance of @action previously added by devm_add_action(). * Both action and data should match one of the existing entries. + * + * In contrast to devm_remove_action(), this function does not WARN() if no + * entry could have been found. + * + * This should only be used if the action is contained in an object with + * independent lifetime management, e.g. the Devres rust abstraction. + * + * Causing the warning from regular driver code most likely indicates an abuse + * of the devres API. + * + * Returns: 0 on success, -ENOENT if no entry could have been found. */ -void devm_remove_action(struct device *dev, void (*action)(void *), void *data) +int devm_remove_action_nowarn(struct device *dev, + void (*action)(void *), + void *data) { struct action_devres devres = { .data = data, .action = action, }; - WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, - &devres)); + return devres_destroy(dev, devm_action_release, devm_action_match, + &devres); } -EXPORT_SYMBOL_GPL(devm_remove_action); +EXPORT_SYMBOL_GPL(devm_remove_action_nowarn); /** * devm_release_action() - release previously added custom action diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index b848764ef018..31bfb3194b4c 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param); static struct vfsmount *mnt; -static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) -{ - struct super_block *s = mnt->mnt_sb; - int err; - - atomic_inc(&s->s_active); - down_write(&s->s_umount); - err = reconfigure_single(s, flags, data); - if (err < 0) { - deactivate_locked_super(s); - return ERR_PTR(err); - } - return dget(s->s_root); -} - static struct file_system_type internal_fs_type = { .name = "devtmpfs", #ifdef CONFIG_TMPFS @@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = { .kill_sb = kill_litter_super, }; +/* Simply take a ref on the existing mount */ +static int devtmpfs_get_tree(struct fs_context *fc) +{ + struct super_block *sb = mnt->mnt_sb; + + atomic_inc(&sb->s_active); + down_write(&sb->s_umount); + fc->root = dget(sb->s_root); + return 0; +} + +/* Ops are filled in during init depending on underlying shmem or ramfs type */ +struct fs_context_operations devtmpfs_context_ops = {}; + +/* Call the underlying initialization and set to our ops */ +static int devtmpfs_init_fs_context(struct fs_context *fc) +{ + int ret; +#ifdef CONFIG_TMPFS + ret = shmem_init_fs_context(fc); +#else + ret = ramfs_init_fs_context(fc); +#endif + if (ret < 0) + return ret; + + fc->ops = &devtmpfs_context_ops; + + return 0; +} + static struct file_system_type dev_fs_type = { .name = "devtmpfs", - .mount = public_dev_mount, + .init_fs_context = devtmpfs_init_fs_context, }; static int devtmpfs_submit_req(struct req *req, const char *tmp) @@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode) { struct dentry *dentry; struct path path; - int err; dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); if (IS_ERR(dentry)) return PTR_ERR(dentry); - err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode); - if (!err) + dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode); + if (!IS_ERR(dentry)) /* mark as kernel-created inode */ d_inode(dentry)->i_private = &thread; done_path_create(&path, dentry); - return err; + return PTR_ERR_OR_ZERO(dentry); } static int create_path(const char *nodepath) @@ -245,15 +259,12 @@ static int dev_rmdir(const char *name) dentry = kern_path_locked(name, &parent); if (IS_ERR(dentry)) return PTR_ERR(dentry); - if (d_really_is_positive(dentry)) { - if (d_inode(dentry)->i_private == &thread) - err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry), - dentry); - else - err = -EPERM; - } else { - err = -ENOENT; - } + if (d_inode(dentry)->i_private == &thread) + err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry), + dentry); + else + err = -EPERM; + dput(dentry); inode_unlock(d_inode(parent.dentry)); path_put(&parent); @@ -285,7 +296,7 @@ static int delete_path(const char *nodepath) return err; } -static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat) +static int dev_mynode(struct device *dev, struct inode *inode) { /* did we create it */ if (inode->i_private != &thread) @@ -293,13 +304,13 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta /* does the dev_t match */ if (is_blockdev(dev)) { - if (!S_ISBLK(stat->mode)) + if (!S_ISBLK(inode->i_mode)) return 0; } else { - if (!S_ISCHR(stat->mode)) + if (!S_ISCHR(inode->i_mode)) return 0; } - if (stat->rdev != dev->devt) + if (inode->i_rdev != dev->devt) return 0; /* ours */ @@ -310,39 +321,33 @@ static int handle_remove(const char *nodename, struct device *dev) { struct path parent; struct dentry *dentry; + struct inode *inode; int deleted = 0; - int err; + int err = 0; dentry = kern_path_locked(nodename, &parent); if (IS_ERR(dentry)) return PTR_ERR(dentry); - if (d_really_is_positive(dentry)) { - struct kstat stat; - struct path p = {.mnt = parent.mnt, .dentry = dentry}; - err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, - AT_STATX_SYNC_AS_STAT); - if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { - struct iattr newattrs; - /* - * before unlinking this node, reset permissions - * of possible references like hardlinks - */ - newattrs.ia_uid = GLOBAL_ROOT_UID; - newattrs.ia_gid = GLOBAL_ROOT_GID; - newattrs.ia_mode = stat.mode & ~0777; - newattrs.ia_valid = - ATTR_UID|ATTR_GID|ATTR_MODE; - inode_lock(d_inode(dentry)); - notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL); - inode_unlock(d_inode(dentry)); - err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry), - dentry, NULL); - if (!err || err == -ENOENT) - deleted = 1; - } - } else { - err = -ENOENT; + inode = d_inode(dentry); + if (dev_mynode(dev, inode)) { + struct iattr newattrs; + /* + * before unlinking this node, reset permissions + * of possible references like hardlinks + */ + newattrs.ia_uid = GLOBAL_ROOT_UID; + newattrs.ia_gid = GLOBAL_ROOT_GID; + newattrs.ia_mode = inode->i_mode & ~0777; + newattrs.ia_valid = + ATTR_UID|ATTR_GID|ATTR_MODE; + inode_lock(d_inode(dentry)); + notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL); + inode_unlock(d_inode(dentry)); + err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry), + dentry, NULL); + if (!err || err == -ENOENT) + deleted = 1; } dput(dentry); inode_unlock(d_inode(parent.dentry)); @@ -443,6 +448,31 @@ static int __ref devtmpfsd(void *p) } /* + * Get the underlying (shmem/ramfs) context ops to build ours + */ +static int devtmpfs_configure_context(void) +{ + struct fs_context *fc; + + fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags, + MS_RMT_MASK); + if (IS_ERR(fc)) + return PTR_ERR(fc); + + /* Set up devtmpfs_context_ops based on underlying type */ + devtmpfs_context_ops.free = fc->ops->free; + devtmpfs_context_ops.dup = fc->ops->dup; + devtmpfs_context_ops.parse_param = fc->ops->parse_param; + devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic; + devtmpfs_context_ops.get_tree = &devtmpfs_get_tree; + devtmpfs_context_ops.reconfigure = fc->ops->reconfigure; + + put_fs_context(fc); + + return 0; +} + +/* * Create devtmpfs instance, driver-core devices will add their device * nodes here. */ @@ -456,6 +486,13 @@ int __init devtmpfs_init(void) pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt)); return PTR_ERR(mnt); } + + err = devtmpfs_configure_context(); + if (err) { + pr_err("unable to configure devtmpfs type %d\n", err); + return err; + } + err = register_filesystem(&dev_fs_type); if (err) { pr_err("unable to register devtmpfs type %d\n", err); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index b4eb5b89c4ee..8ab010ddf709 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override); * Iterate over the @drv's list of devices calling @fn for each one. */ int driver_for_each_device(struct device_driver *drv, struct device *start, - void *data, int (*fn)(struct device *, void *)) + void *data, device_iter_t fn) { struct klist_iter i; struct device *dev; @@ -160,9 +160,12 @@ struct device *driver_find_device(const struct device_driver *drv, klist_iter_init_node(&drv->p->klist_devices, &i, (start ? &start->p->knode_driver : NULL)); - while ((dev = next_device(&i))) - if (match(dev, data) && get_device(dev)) + while ((dev = next_device(&i))) { + if (match(dev, data)) { + get_device(dev); break; + } + } klist_iter_exit(&i); return dev; } diff --git a/drivers/base/faux.c b/drivers/base/faux.c new file mode 100644 index 000000000000..407c1d1aad50 --- /dev/null +++ b/drivers/base/faux.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org> + * Copyright (c) 2025 The Linux Foundation + * + * A "simple" faux bus that allows devices to be created and added + * automatically to it. This is to be used whenever you need to create a + * device that is not associated with any "real" system resources, and do + * not want to have to deal with a bus/driver binding logic. It is + * intended to be very simple, with only a create and a destroy function + * available. + */ +#include <linux/err.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/container_of.h> +#include <linux/device/faux.h> +#include "base.h" + +/* + * Internal wrapper structure so we can hold a pointer to the + * faux_device_ops for this device. + */ +struct faux_object { + struct faux_device faux_dev; + const struct faux_device_ops *faux_ops; +}; +#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev) + +static struct device faux_bus_root = { + .init_name = "faux", +}; + +static int faux_match(struct device *dev, const struct device_driver *drv) +{ + /* Match always succeeds, we only have one driver */ + return 1; +} + +static int faux_probe(struct device *dev) +{ + struct faux_object *faux_obj = to_faux_object(dev); + struct faux_device *faux_dev = &faux_obj->faux_dev; + const struct faux_device_ops *faux_ops = faux_obj->faux_ops; + int ret = 0; + + if (faux_ops && faux_ops->probe) + ret = faux_ops->probe(faux_dev); + + return ret; +} + +static void faux_remove(struct device *dev) +{ + struct faux_object *faux_obj = to_faux_object(dev); + struct faux_device *faux_dev = &faux_obj->faux_dev; + const struct faux_device_ops *faux_ops = faux_obj->faux_ops; + + if (faux_ops && faux_ops->remove) + faux_ops->remove(faux_dev); +} + +static const struct bus_type faux_bus_type = { + .name = "faux", + .match = faux_match, + .probe = faux_probe, + .remove = faux_remove, +}; + +static struct device_driver faux_driver = { + .name = "faux_driver", + .bus = &faux_bus_type, + .probe_type = PROBE_FORCE_SYNCHRONOUS, +}; + +static void faux_device_release(struct device *dev) +{ + struct faux_object *faux_obj = to_faux_object(dev); + + kfree(faux_obj); +} + +/** + * faux_device_create_with_groups - Create and register with the driver + * core a faux device and populate the device with an initial + * set of sysfs attributes. + * @name: The name of the device we are adding, must be unique for + * all faux devices. + * @parent: Pointer to a potential parent struct device. If set to + * NULL, the device will be created in the "root" of the faux + * device tree in sysfs. + * @faux_ops: struct faux_device_ops that the new device will call back + * into, can be NULL. + * @groups: The set of sysfs attributes that will be created for this + * device when it is registered with the driver core. + * + * Create a new faux device and register it in the driver core properly. + * If present, callbacks in @faux_ops will be called with the device that + * for the caller to do something with at the proper time given the + * device's lifecycle. + * + * Note, when this function is called, the functions specified in struct + * faux_ops can be called before the function returns, so be prepared for + * everything to be properly initialized before that point in time. If the + * probe callback (if one is present) does NOT succeed, the creation of the + * device will fail and NULL will be returned. + * + * Return: + * * NULL if an error happened with creating the device + * * pointer to a valid struct faux_device that is registered with sysfs + */ +struct faux_device *faux_device_create_with_groups(const char *name, + struct device *parent, + const struct faux_device_ops *faux_ops, + const struct attribute_group **groups) +{ + struct faux_object *faux_obj; + struct faux_device *faux_dev; + struct device *dev; + int ret; + + faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL); + if (!faux_obj) + return NULL; + + /* Save off the callbacks so we can use them in the future */ + faux_obj->faux_ops = faux_ops; + + /* Initialize the device portion and register it with the driver core */ + faux_dev = &faux_obj->faux_dev; + dev = &faux_dev->dev; + + device_initialize(dev); + dev->release = faux_device_release; + if (parent) + dev->parent = parent; + else + dev->parent = &faux_bus_root; + dev->bus = &faux_bus_type; + dev->groups = groups; + dev_set_name(dev, "%s", name); + + ret = device_add(dev); + if (ret) { + pr_err("%s: device_add for faux device '%s' failed with %d\n", + __func__, name, ret); + put_device(dev); + return NULL; + } + + /* + * Verify that we did bind the driver to the device (i.e. probe worked), + * if not, let's fail the creation as trying to guess if probe was + * successful is almost impossible to determine by the caller. + */ + if (!dev->driver) { + dev_err(dev, "probe did not succeed, tearing down the device\n"); + faux_device_destroy(faux_dev); + faux_dev = NULL; + } + + return faux_dev; +} +EXPORT_SYMBOL_GPL(faux_device_create_with_groups); + +/** + * faux_device_create - create and register with the driver core a faux device + * @name: The name of the device we are adding, must be unique for all + * faux devices. + * @parent: Pointer to a potential parent struct device. If set to + * NULL, the device will be created in the "root" of the faux + * device tree in sysfs. + * @faux_ops: struct faux_device_ops that the new device will call back + * into, can be NULL. + * + * Create a new faux device and register it in the driver core properly. + * If present, callbacks in @faux_ops will be called with the device that + * for the caller to do something with at the proper time given the + * device's lifecycle. + * + * Note, when this function is called, the functions specified in struct + * faux_ops can be called before the function returns, so be prepared for + * everything to be properly initialized before that point in time. + * + * Return: + * * NULL if an error happened with creating the device + * * pointer to a valid struct faux_device that is registered with sysfs + */ +struct faux_device *faux_device_create(const char *name, + struct device *parent, + const struct faux_device_ops *faux_ops) +{ + return faux_device_create_with_groups(name, parent, faux_ops, NULL); +} +EXPORT_SYMBOL_GPL(faux_device_create); + +/** + * faux_device_destroy - destroy a faux device + * @faux_dev: faux device to destroy + * + * Unregisters and cleans up a device that was created with a call to + * faux_device_create() + */ +void faux_device_destroy(struct faux_device *faux_dev) +{ + struct device *dev = &faux_dev->dev; + + if (!faux_dev) + return; + + device_del(dev); + + /* The final put_device() will clean up the memory we allocated for this device. */ + put_device(dev); +} +EXPORT_SYMBOL_GPL(faux_device_destroy); + +int __init faux_bus_init(void) +{ + int ret; + + ret = device_register(&faux_bus_root); + if (ret) { + put_device(&faux_bus_root); + return ret; + } + + ret = bus_register(&faux_bus_type); + if (ret) + goto error_bus; + + ret = driver_register(&faux_driver); + if (ret) + goto error_driver; + + return ret; + +error_driver: + bus_unregister(&faux_bus_type); + +error_bus: + device_unregister(&faux_bus_root); + return ret; +} diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index ddb70e29eb42..c8afc501a8a4 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -25,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = { EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE"); #ifdef CONFIG_SYSCTL -static struct ctl_table firmware_config_table[] = { +static const struct ctl_table firmware_config_table[] = { { .procname = "force_sysfs_fallback", .data = &fw_fallback_config.force_sysfs_fallback, diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index c9c93b47d9a5..d254ceb56d84 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer, } static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) * the driver as a firmware image. **/ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -356,11 +356,11 @@ out: return retval; } -static struct bin_attribute firmware_attr_data = { +static const struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, - .read = firmware_data_read, - .write = firmware_data_write, + .read_new = firmware_data_read, + .write_new = firmware_data_write, }; static struct attribute *fw_dev_attrs[] = { @@ -374,14 +374,14 @@ static struct attribute *fw_dev_attrs[] = { NULL }; -static struct bin_attribute *fw_dev_bin_attrs[] = { +static const struct bin_attribute *const fw_dev_bin_attrs[] = { &firmware_attr_data, NULL }; static const struct attribute_group fw_dev_attr_group = { .attrs = fw_dev_attrs, - .bin_attrs = fw_dev_bin_attrs, + .bin_attrs_new = fw_dev_bin_attrs, #ifdef CONFIG_FW_UPLOAD .is_visible = fw_upload_is_visible, #endif diff --git a/drivers/base/init.c b/drivers/base/init.c index c4954835128c..9d2b06d65dfc 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -32,6 +32,7 @@ void __init driver_init(void) /* These are also core pieces, but must come after the * core core pieces. */ + faux_bus_init(); of_core_init(); platform_bus_init(); auxiliary_bus_init(); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 67858eeb92ed..19469e7f88c2 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -455,7 +455,7 @@ static ssize_t valid_zones_show(struct device *dev, struct memory_group *group = mem->group; struct zone *default_zone; int nid = mem->nid; - int len = 0; + int len; /* * Check the existing zone. Make sure that we do that only on the @@ -466,22 +466,18 @@ static ssize_t valid_zones_show(struct device *dev, * If !mem->zone, the memory block spans multiple zones and * cannot get offlined. */ - default_zone = mem->zone; - if (!default_zone) - return sysfs_emit(buf, "%s\n", "none"); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); - goto out; + return sysfs_emit(buf, "%s\n", + mem->zone ? mem->zone->name : "none"); } default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, start_pfn, nr_pages); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); + len = sysfs_emit(buf, "%s", default_zone->name); len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, default_zone); len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, default_zone); -out: len += sysfs_emit_at(buf, len, "\n"); return len; } @@ -512,7 +508,7 @@ static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - online_type_to_str[mhp_default_online_type]); + online_type_to_str[mhp_get_default_online_type()]); } static ssize_t auto_online_blocks_store(struct device *dev, @@ -524,7 +520,7 @@ static ssize_t auto_online_blocks_store(struct device *dev, if (online_type < 0) return -EINVAL; - mhp_default_online_type = online_type; + mhp_set_default_online_type(online_type); return count; } @@ -820,22 +816,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state, return 0; } -static int __init add_boot_memory_block(unsigned long base_section_nr) -{ - int section_count = 0; - unsigned long nr; - - for (nr = base_section_nr; nr < base_section_nr + sections_per_block; - nr++) - if (present_section_nr(nr)) - section_count++; - - if (section_count == 0) - return 0; - return add_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE, NULL, NULL); -} - static int add_hotplug_memory_block(unsigned long block_id, struct vmem_altmap *altmap, struct memory_group *group) @@ -962,7 +942,7 @@ static const struct attribute_group *memory_root_attr_groups[] = { void __init memory_dev_init(void) { int ret; - unsigned long block_sz, nr; + unsigned long block_sz, block_id, nr; /* Validate the configured memory block size */ block_sz = memory_block_size_bytes(); @@ -975,15 +955,23 @@ void __init memory_dev_init(void) panic("%s() failed to register subsystem: %d\n", __func__, ret); /* - * Create entries for memory sections that were found - * during boot and have been initialized + * Create entries for memory sections that were found during boot + * and have been initialized. Use @block_id to track the last + * handled block and initialize it to an invalid value (ULONG_MAX) + * to bypass the block ID matching check for the first present + * block so that it can be covered. */ - for (nr = 0; nr <= __highest_present_section_nr; - nr += sections_per_block) { - ret = add_boot_memory_block(nr); - if (ret) - panic("%s() failed to add memory block: %d\n", __func__, - ret); + block_id = ULONG_MAX; + for_each_present_section_nr(0, nr) { + if (block_id != ULONG_MAX && memory_block_id(nr) == block_id) + continue; + + block_id = memory_block_id(nr); + ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL); + if (ret) { + panic("%s() failed to add memory block: %d\n", + __func__, ret); + } } } diff --git a/drivers/base/module.c b/drivers/base/module.c index 5bc71bea883a..218aaa096455 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv) if (mod) mk = &mod->mkobj; else if (drv->mod_name) { - struct kobject *mkobj; - - /* Lookup built-in module entry in /sys/modules */ - mkobj = kset_find_obj(module_kset, drv->mod_name); - if (mkobj) { - mk = container_of(mkobj, struct module_kobject, kobj); + /* Lookup or create built-in module entry in /sys/modules */ + mk = lookup_or_create_module_kobject(drv->mod_name); + if (mk) { /* remember our module structure */ drv->p->mkobj = mk; - /* kset_find_obj took a reference */ - kobject_put(mkobj); + /* lookup_or_create_module_kobject took a reference */ + kobject_put(&mk->kobj); } } diff --git a/drivers/base/node.c b/drivers/base/node.c index 0ea653fa3433..cd13ef287011 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -244,12 +244,14 @@ CACHE_ATTR(size, "%llu") CACHE_ATTR(line_size, "%u") CACHE_ATTR(indexing, "%u") CACHE_ATTR(write_policy, "%u") +CACHE_ATTR(address_mode, "%#x") static struct attribute *cache_attrs[] = { &dev_attr_indexing.attr, &dev_attr_size.attr, &dev_attr_line_size.attr, &dev_attr_write_policy.attr, + &dev_attr_address_mode.attr, NULL, }; ATTRIBUTE_GROUPS(cache); diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c index 951819e71b4a..a5539e294d4d 100644 --- a/drivers/base/physical_location.c +++ b/drivers/base/physical_location.c @@ -7,19 +7,18 @@ #include <linux/acpi.h> #include <linux/sysfs.h> +#include <linux/string_choices.h> #include "physical_location.h" bool dev_add_physical_location(struct device *dev) { struct acpi_pld_info *pld; - acpi_status status; if (!has_acpi_companion(dev)) return false; - status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld); - if (ACPI_FAILURE(status)) + if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld)) return false; dev->physical_location = @@ -118,7 +117,7 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - dev->physical_location->dock ? "yes" : "no"); + str_yes_no(dev->physical_location->dock)); } static DEVICE_ATTR_RO(dock); @@ -126,7 +125,7 @@ static ssize_t lid_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - dev->physical_location->lid ? "yes" : "no"); + str_yes_no(dev->physical_location->lid)); } static DEVICE_ATTR_RO(lid); diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 0e60dd650b5e..70db08f3ac6f 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); void platform_device_msi_free_irqs_all(struct device *dev) { msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN); } EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 6f2a33722c52..cfccf3ff36e7 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev) static int platform_dma_configure(struct device *dev) { - struct platform_driver *drv = to_platform_driver(dev->driver); + struct device_driver *drv = READ_ONCE(dev->driver); struct fwnode_handle *fwnode = dev_fwnode(dev); enum dev_dma_attr attr; int ret = 0; @@ -1451,7 +1451,8 @@ static int platform_dma_configure(struct device *dev) attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); ret = acpi_dma_configure(dev, attr); } - if (ret || drv->driver_managed_dma) + /* @dev->driver may not be valid when we're called from the IOMMU layer */ + if (ret || !drv || to_platform_driver(drv)->driver_managed_dma) return ret; ret = iommu_device_use_default_domain(dev); diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index e18ba676cdf6..b69bcb37c830 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk) } EXPORT_SYMBOL_GPL(pm_clk_add_clk); - -/** - * of_pm_clk_add_clk - Start using a device clock for power management. - * @dev: Device whose clock is going to be used for power management. - * @name: Name of clock that is going to be used for power management. - * - * Add the clock described in the 'clocks' device-tree node that matches - * with the 'name' provided, to the list of clocks used for the power - * management of @dev. On success, returns 0. Returns a negative error - * code if the clock is not found or cannot be added. - */ -int of_pm_clk_add_clk(struct device *dev, const char *name) -{ - struct clk *clk; - int ret; - - if (!dev || !dev->of_node || !name) - return -EINVAL; - - clk = of_clk_get_by_name(dev->of_node, name); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - ret = pm_clk_add_clk(dev, clk); - if (ret) { - clk_put(clk); - return ret; - } - - return 0; -} -EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); - /** * of_pm_clk_add_clks - Start using device clock(s) for power management. * @dev: Device whose clock(s) is going to be used for power management. @@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) } /** - * pm_clk_remove - Stop using a device clock for power management. - * @dev: Device whose clock should not be used for PM any more. - * @con_id: Connection ID of the clock. - * - * Remove the clock represented by @con_id from the list of clocks used for - * the power management of @dev. - */ -void pm_clk_remove(struct device *dev, const char *con_id) -{ - struct pm_subsys_data *psd = dev_to_psd(dev); - struct pm_clock_entry *ce; - - if (!psd) - return; - - pm_clk_list_lock(psd); - - list_for_each_entry(ce, &psd->clock_list, node) { - if (!con_id && !ce->con_id) - goto remove; - else if (!con_id || !ce->con_id) - continue; - else if (!strcmp(con_id, ce->con_id)) - goto remove; - } - - pm_clk_list_unlock(psd); - return; - - remove: - list_del(&ce->node); - if (ce->enabled_when_prepared) - psd->clock_op_might_sleep--; - pm_clk_list_unlock(psd); - - __pm_clk_remove(ce); -} -EXPORT_SYMBOL_GPL(pm_clk_remove); - -/** * pm_clk_remove_clk - Stop using a device clock for power management. * @dev: Device whose clock should not be used for PM any more. * @clk: Clock pointer diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 4fa525668cb7..6502720bb564 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); /** - * pm_generic_freeze_late - Generic freeze_late callback for subsystems. - * @dev: Device to freeze. - */ -int pm_generic_freeze_late(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; -} -EXPORT_SYMBOL_GPL(pm_generic_freeze_late); - -/** * pm_generic_freeze - Generic freeze callback for subsystems. * @dev: Device to freeze. */ @@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); /** - * pm_generic_thaw_early - Generic thaw_early callback for subsystems. - * @dev: Device to thaw. - */ -int pm_generic_thaw_early(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; -} -EXPORT_SYMBOL_GPL(pm_generic_thaw_early); - -/** * pm_generic_thaw - Generic thaw callback for subsystems. * @dev: Device to thaw. */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 4a67e83300e1..1926454c7a7e 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -249,7 +249,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr) static void dpm_wait_for_children(struct device *dev, bool async) { - device_for_each_child(dev, &async, dpm_wait_fn); + device_for_each_child(dev, &async, dpm_wait_fn); } static void dpm_wait_for_suppliers(struct device *dev, bool async) @@ -496,6 +496,7 @@ struct dpm_watchdog { struct device *dev; struct task_struct *tsk; struct timer_list timer; + bool fatal; }; #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ @@ -512,11 +513,23 @@ struct dpm_watchdog { static void dpm_watchdog_handler(struct timer_list *t) { struct dpm_watchdog *wd = from_timer(wd, t, timer); + struct timer_list *timer = &wd->timer; + unsigned int time_left; + + if (wd->fatal) { + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); + show_stack(wd->tsk, NULL, KERN_EMERG); + panic("%s %s: unrecoverable failure\n", + dev_driver_string(wd->dev), dev_name(wd->dev)); + } + + time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; + dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n", + CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left); + show_stack(wd->tsk, NULL, KERN_WARNING); - dev_emerg(wd->dev, "**** DPM device timeout ****\n"); - show_stack(wd->tsk, NULL, KERN_EMERG); - panic("%s %s: unrecoverable failure\n", - dev_driver_string(wd->dev), dev_name(wd->dev)); + wd->fatal = true; + mod_timer(timer, jiffies + HZ * time_left); } /** @@ -530,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) wd->dev = dev; wd->tsk = current; + wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; timer_setup_on_stack(timer, dpm_watchdog_handler, 0); /* use same timeout value for both suspend and resume */ - timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; + timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; add_timer(timer); } @@ -545,7 +559,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) { struct timer_list *timer = &wd->timer; - del_timer_sync(timer); + timer_delete_sync(timer); destroy_timer_on_stack(timer); } #else @@ -585,27 +599,34 @@ static bool is_async(struct device *dev) static bool dpm_async_fn(struct device *dev, async_func_t func) { - reinit_completion(&dev->power.completion); + if (!is_async(dev)) + return false; - if (is_async(dev)) { - dev->power.async_in_progress = true; + dev->power.work_in_progress = true; - get_device(dev); + get_device(dev); + + if (async_schedule_dev_nocall(func, dev)) + return true; - if (async_schedule_dev_nocall(func, dev)) - return true; + put_device(dev); - put_device(dev); - } /* - * Because async_schedule_dev_nocall() above has returned false or it - * has not been called at all, func() is not running and it is safe to - * update the async_in_progress flag without extra synchronization. + * async_schedule_dev_nocall() above has returned false, so func() is + * not running and it is safe to update power.work_in_progress without + * extra synchronization. */ - dev->power.async_in_progress = false; + dev->power.work_in_progress = false; + return false; } +static void dpm_clear_async_state(struct device *dev) +{ + reinit_completion(&dev->power.completion); + dev->power.work_in_progress = false; +} + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -642,12 +663,12 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy * so change its status accordingly. * * Otherwise, the device is going to be resumed, so set its PM-runtime - * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set - * to avoid confusing drivers that don't use it. + * status to "active" unless its power.smart_suspend flag is clear, in + * which case it is not necessary to update its PM-runtime status. */ if (skip_resume) pm_runtime_set_suspended(dev); - else if (dev_pm_skip_suspend(dev)) + else if (dev_pm_smart_suspend(dev)) pm_runtime_set_active(dev); if (dev->pm_domain) { @@ -715,14 +736,16 @@ static void dpm_noirq_resume_devices(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_noirq_list, power.entry) + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume_noirq); + } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -855,14 +878,16 @@ void dpm_resume_early(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_late_early_list, power.entry) + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume_early); + } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -913,8 +938,20 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (!dev->power.is_suspended) + goto Complete; + + dev->power.is_suspended = false; + if (dev->power.direct_complete) { - /* Match the pm_runtime_disable() in __device_suspend(). */ + /* + * Allow new children to be added under the device after this + * point if it has no PM callbacks. + */ + if (dev->power.no_pm_callbacks) + dev->power.is_prepared = false; + + /* Match the pm_runtime_disable() in device_suspend(). */ pm_runtime_enable(dev); goto Complete; } @@ -931,9 +968,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false; - if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -971,9 +1005,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; - Unlock: device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1021,14 +1053,16 @@ void dpm_resume(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_suspended_list, power.entry) + list_for_each_entry(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume); + } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1093,6 +1127,8 @@ static void device_complete(struct device *dev, pm_message_t state) device_unlock(dev); out: + /* If enabling runtime PM for the device is blocked, unblock it. */ + pm_runtime_unblock(dev); pm_runtime_put(dev); } @@ -1254,14 +1290,13 @@ Skip: dev->power.is_noirq_suspended = true; /* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true; if (dev->power.must_resume) @@ -1298,6 +1333,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1382,6 +1418,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_SUSPEND(0); + /* + * Disable runtime PM for the device without checking if there is a + * pending resume request for it. + */ __pm_runtime_disable(dev, false); dpm_wait_for_subordinate(dev, async); @@ -1471,6 +1511,7 @@ int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1628,6 +1669,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; } @@ -1738,6 +1780,7 @@ int dpm_suspend(pm_message_t state) list_move(&dev->power.entry, &dpm_suspended_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1769,6 +1812,46 @@ int dpm_suspend(pm_message_t state) return error; } +static bool device_prepare_smart_suspend(struct device *dev) +{ + struct device_link *link; + bool ret = true; + int idx; + + /* + * The "smart suspend" feature is enabled for devices whose drivers ask + * for it and for devices without PM callbacks. + * + * However, if "smart suspend" is not enabled for the device's parent + * or any of its suppliers that take runtime PM into account, it cannot + * be enabled for the device either. + */ + if (!dev->power.no_pm_callbacks && + !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + return false; + + if (dev->parent && !dev_pm_smart_suspend(dev->parent) && + !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent)) + return false; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + continue; + + if (!dev_pm_smart_suspend(link->supplier) && + !pm_runtime_blocked(link->supplier)) { + ret = false; + break; + } + } + + device_links_read_unlock(idx); + + return ret; +} + /** * device_prepare - Prepare a device for system power transition. * @dev: Device to handle. @@ -1780,6 +1863,7 @@ int dpm_suspend(pm_message_t state) static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; + bool smart_suspend; int ret = 0; /* @@ -1789,6 +1873,13 @@ static int device_prepare(struct device *dev, pm_message_t state) * it again during the complete phase. */ pm_runtime_get_noresume(dev); + /* + * If runtime PM is disabled for the device at this point and it has + * never been enabled so far, it should not be enabled until this system + * suspend-resume cycle is complete, so prepare to trigger a warning on + * subsequent attempts to enable it. + */ + smart_suspend = !pm_runtime_block_if_disabled(dev); if (dev->power.syscore) return 0; @@ -1823,6 +1914,13 @@ unlock: pm_runtime_put(dev); return ret; } + /* Do not enable "smart suspend" for devices with disabled runtime PM. */ + if (smart_suspend) + smart_suspend = device_prepare_smart_suspend(dev); + + spin_lock_irq(&dev->power.lock); + + dev->power.smart_suspend = smart_suspend; /* * A positive return value from ->prepare() means "this device appears * to be runtime-suspended and its state is fine, so if it really is @@ -1830,11 +1928,12 @@ unlock: * will do the same thing with all of its descendants". This only * applies to suspend transitions, however. */ - spin_lock_irq(&dev->power.lock); dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && (ret > 0 || dev->power.no_pm_callbacks) && !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); + spin_unlock_irq(&dev->power.lock); + return 0; } @@ -1998,6 +2097,5 @@ void device_pm_check_callbacks(struct device *dev) bool dev_pm_skip_suspend(struct device *dev) { - return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && - pm_runtime_status_suspended(dev); + return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2ee45841486b..c55a7c70bc1a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -448,8 +448,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) retval = __rpm_callback(cb, dev); } - dev->power.runtime_error = retval; - return retval != -EACCES ? retval : -EIO; + /* + * Since -EACCES means that runtime PM is disabled for the given device, + * it should not be returned by runtime PM callbacks. If it is returned + * nevertheless, assume it to be a transient error and convert it to + * -EAGAIN. + */ + if (retval == -EACCES) + retval = -EAGAIN; + + if (retval != -EAGAIN && retval != -EBUSY) + dev->power.runtime_error = retval; + + return retval; } /** @@ -725,21 +736,18 @@ static int rpm_suspend(struct device *dev, int rpmflags) dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); - if (retval == -EAGAIN || retval == -EBUSY) { - dev->power.runtime_error = 0; + /* + * On transient errors, if the callback routine failed an autosuspend, + * and if the last_busy time has been updated so that there is a new + * autosuspend expiration time, automatically reschedule another + * autosuspend. + */ + if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + + pm_runtime_cancel_pending(dev); - /* - * If the callback routine failed an autosuspend, and - * if the last_busy time has been updated so that there - * is a new autosuspend expiration time, automatically - * reschedule another autosuspend. - */ - if ((rpmflags & RPM_AUTO) && - pm_runtime_autosuspend_expiration(dev) != 0) - goto repeat; - } else { - pm_runtime_cancel_pending(dev); - } goto out; } @@ -1003,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + if (expires > 0 && expires <= ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -1460,20 +1468,31 @@ int pm_runtime_barrier(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_barrier); -/** - * __pm_runtime_disable - Disable runtime PM of a device. - * @dev: Device to handle. - * @check_resume: If set, check if there's a resume request for the device. - * - * Increment power.disable_depth for the device and if it was zero previously, - * cancel all pending runtime PM requests for the device and wait for all - * operations in progress to complete. The device can be either active or - * suspended after its runtime PM has been disabled. - * - * If @check_resume is set and there's a resume request pending when - * __pm_runtime_disable() is called and power.disable_depth is zero, the - * function will wake up the device before disabling its runtime PM. - */ +bool pm_runtime_block_if_disabled(struct device *dev) +{ + bool ret; + + spin_lock_irq(&dev->power.lock); + + ret = !pm_runtime_enabled(dev); + if (ret && dev->power.last_status == RPM_INVALID) + dev->power.last_status = RPM_BLOCKED; + + spin_unlock_irq(&dev->power.lock); + + return ret; +} + +void pm_runtime_unblock(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + + if (dev->power.last_status == RPM_BLOCKED) + dev->power.last_status = RPM_INVALID; + + spin_unlock_irq(&dev->power.lock); +} + void __pm_runtime_disable(struct device *dev, bool check_resume) { spin_lock_irq(&dev->power.lock); @@ -1532,6 +1551,10 @@ void pm_runtime_enable(struct device *dev) if (--dev->power.disable_depth > 0) goto out; + if (dev->power.last_status == RPM_BLOCKED) { + dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n"); + dump_stack(); + } dev->power.last_status = RPM_INVALID; dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); @@ -1545,6 +1568,32 @@ out: } EXPORT_SYMBOL_GPL(pm_runtime_enable); +static void pm_runtime_set_suspended_action(void *data) +{ + pm_runtime_set_suspended(data); +} + +/** + * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_set_active_enabled(struct device *dev) +{ + int err; + + err = pm_runtime_set_active(dev); + if (err) + return err; + + err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); + if (err) + return err; + + return devm_pm_runtime_enable(dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); + static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -1567,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); +static void pm_runtime_put_noidle_action(void *data) +{ + pm_runtime_put_noidle(data); +} + +/** + * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_get_noresume(struct device *dev) +{ + pm_runtime_get_noresume(dev); + + return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); + /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. @@ -1764,8 +1831,8 @@ void pm_runtime_init(struct device *dev) INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; - hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - dev->power.suspend_timer.function = pm_suspend_timer_fn; + hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); init_waitqueue_head(&dev->power.wait_queue); } @@ -1874,7 +1941,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || @@ -1959,7 +2026,7 @@ int pm_runtime_force_resume(struct device *dev) int (*callback)(struct device *); int ret = 0; - if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) + if (!dev->power.needs_force_resume) goto out; /* diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index f8163b559bf9..f84018125b46 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,7 +6,6 @@ #include <linux/export.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> -#include <linux/pm_wakeup.h> #include <linux/atomic.h> #include <linux/jiffies.h> #include "power.h" diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 5a5a9e978e85..8aa28c08b289 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); +static void devm_pm_clear_wake_irq(void *dev) +{ + dev_pm_clear_wake_irq(dev); +} + +/** + * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq + * @dev: Device entry + * @irq: Device IO interrupt + * + * + * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq, + * but the device will be auto clear wake capability on driver detach. + */ +int devm_pm_set_wake_irq(struct device *dev, int irq) +{ + int ret; + + ret = dev_pm_set_wake_irq(dev, irq); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq); + /** * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts * @irq: Device specific dedicated wake-up interrupt diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 752b417e8129..63bf914a4d44 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -197,7 +197,7 @@ void wakeup_source_remove(struct wakeup_source *ws) raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); - del_timer_sync(&ws->timer); + timer_delete_sync(&ws->timer); /* * Clear timer.function to make wakeup_source_not_registered() treat * this wakeup source as not registered. @@ -613,7 +613,7 @@ void __pm_stay_awake(struct wakeup_source *ws) spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws, false); - del_timer(&ws->timer); + timer_delete(&ws->timer); ws->timer_expires = 0; spin_unlock_irqrestore(&ws->lock, flags); @@ -693,7 +693,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) ws->max_time = duration; ws->last_time = now; - del_timer(&ws->timer); + timer_delete(&ws->timer); ws->timer_expires = 0; if (ws->autosleep_enabled) diff --git a/drivers/base/property.c b/drivers/base/property.c index 837d77e3af2b..c1392743df9c 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode, EXPORT_SYMBOL_GPL(fwnode_property_present); /** + * device_property_read_bool - Return the value for a boolean property of a device + * @dev: Device whose property is being checked + * @propname: Name of the property + * + * Return if property @propname is true or false in the device firmware description. + * + * Return: true if property @propname is present. Otherwise, returns false. + */ +bool device_property_read_bool(const struct device *dev, const char *propname) +{ + return fwnode_property_read_bool(dev_fwnode(dev), propname); +} +EXPORT_SYMBOL_GPL(device_property_read_bool); + +/** + * fwnode_property_read_bool - Return the value for a boolean property of a firmware node + * @fwnode: Firmware node whose property to check + * @propname: Name of the property + * + * Return if property @propname is true or false in the firmware description. + */ +bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, + const char *propname) +{ + bool ret; + + if (IS_ERR_OR_NULL(fwnode)) + return false; + + ret = fwnode_call_bool_op(fwnode, property_read_bool, propname); + if (ret) + return ret; + + return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname); +} +EXPORT_SYMBOL_GPL(fwnode_property_read_bool); + +/** * device_property_read_u8_array - return a u8 array property of a device * @dev: Device to get the property of * @propname: Name of the property diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index bdb450436cbc..6f31240ee4a9 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -73,12 +73,12 @@ struct regmap { void *bus_context; const char *name; - bool async; spinlock_t async_lock; wait_queue_head_t async_waitq; struct list_head async_list; struct list_head async_free; int async_ret; + bool async; #ifdef CONFIG_DEBUG_FS bool debugfs_disable; @@ -117,8 +117,6 @@ struct regmap { void *val_buf, size_t val_size); int (*write)(void *context, const void *data, size_t count); - bool defer_caching; - unsigned long read_flag_mask; unsigned long write_flag_mask; @@ -127,6 +125,8 @@ struct regmap { int reg_stride; int reg_stride_order; + bool defer_caching; + /* If set, will always write field to HW. */ bool force_write_field; @@ -161,6 +161,9 @@ struct regmap { struct reg_sequence *patch; int patch_regs; + /* if set, the regmap core can sleep */ + bool can_sleep; + /* if set, converts bulk read to single read */ bool use_single_read; /* if set, converts bulk write to single write */ @@ -176,9 +179,6 @@ struct regmap { void *selector_work_buf; /* Scratch buffer used for selector */ struct hwspinlock *hwlock; - - /* if set, the regmap core can sleep */ - bool can_sleep; }; struct regcache_ops { diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index 23da7b31d715..2319c30283a6 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg, rcu_read_unlock(); - entry = kmalloc((last - index + 1) * sizeof(unsigned long), - map->alloc_flags); + entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags); if (!entry) return -ENOMEM; @@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry, * overheads. */ if (max - min > 1 && regmap_can_raw_write(map)) { - buf = kmalloc(val_bytes * (max - min), map->alloc_flags); + buf = kmalloc_array(max - min, val_bytes, map->alloc_flags); if (!buf) { ret = -ENOMEM; goto out; @@ -320,7 +319,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first, unsigned long *entry; int i, ret; - entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags); + entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags); if (!entry) return -ENOMEM; diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 188438186589..a9d17f316e55 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -275,18 +275,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, pos = (reg - base_reg) / map->reg_stride; offset = (rbnode->base_reg - base_reg) / map->reg_stride; - blk = krealloc(rbnode->block, - blklen * map->cache_word_size, - map->alloc_flags); + blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags); if (!blk) return -ENOMEM; rbnode->block = blk; if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { - present = krealloc(rbnode->cache_present, - BITS_TO_LONGS(blklen) * sizeof(*present), - map->alloc_flags); + present = krealloc_array(rbnode->cache_present, + BITS_TO_LONGS(blklen), sizeof(*present), + map->alloc_flags); if (!present) return -ENOMEM; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index d3659ba3cc11..f7fcf2de1301 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -21,6 +21,37 @@ static const struct regcache_ops *cache_types[] = { ®cache_flat_ops, }; +static int regcache_defaults_cmp(const void *a, const void *b) +{ + const struct reg_default *x = a; + const struct reg_default *y = b; + + if (x->reg > y->reg) + return 1; + else if (x->reg < y->reg) + return -1; + else + return 0; +} + +static void regcache_defaults_swap(void *a, void *b, int size) +{ + struct reg_default *x = a; + struct reg_default *y = b; + struct reg_default tmp; + + tmp = *x; + *x = *y; + *y = tmp; +} + +void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults) +{ + sort(defaults, ndefaults, sizeof(*defaults), + regcache_defaults_cmp, regcache_defaults_swap); +} +EXPORT_SYMBOL_GPL(regcache_sort_defaults); + static int regcache_hw_init(struct regmap *map) { int i, j; @@ -154,7 +185,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) map->num_reg_defaults = config->num_reg_defaults; map->num_reg_defaults_raw = config->num_reg_defaults_raw; map->reg_defaults_raw = config->reg_defaults_raw; - map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); + map->cache_word_size = BITS_TO_BYTES(config->val_bits); map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; map->cache = NULL; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 978613407ea3..6c6869188c31 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -823,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, /* Ack masked but set interrupts */ if (d->chip->no_status) { /* no status register so default to all active */ - d->status_buf[i] = GENMASK(31, 0); + d->status_buf[i] = UINT_MAX; } else { reg = d->get_irq_reg(d, d->chip->status_base, i); ret = regmap_read(map, reg, &d->status_buf[i]); diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c index c99eada83780..86644bbd0710 100644 --- a/drivers/base/regmap/regmap-sdw-mbq.c +++ b/drivers/base/regmap/regmap-sdw-mbq.c @@ -1,45 +1,187 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright(c) 2020 Intel Corporation. +#include <linux/bits.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_registers.h> +#include <sound/sdca_function.h> #include "internal.h" +struct regmap_mbq_context { + struct device *dev; + + struct regmap_sdw_mbq_cfg cfg; + + int val_size; + bool (*readable_reg)(struct device *dev, unsigned int reg); +}; + +static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg) +{ + int size = ctx->val_size; + + if (ctx->cfg.mbq_size) { + size = ctx->cfg.mbq_size(ctx->dev, reg); + if (!size || size > ctx->val_size) + return -EINVAL; + } + + return size; +} + +static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg) +{ + if (ctx->cfg.deferrable) + return ctx->cfg.deferrable(ctx->dev, reg); + + return false; +} + +static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg, + struct regmap_mbq_context *ctx) +{ + struct device *dev = &slave->dev; + int val, ret = 0; + + dev_dbg(dev, "Deferring transaction for 0x%x\n", reg); + + reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0, + SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0); + + if (ctx->readable_reg(dev, reg)) { + ret = read_poll_timeout(sdw_read_no_pm, val, + val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY), + ctx->cfg.timeout_us, ctx->cfg.retry_us, + false, slave, reg); + if (val < 0) + return val; + if (ret) + dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val); + } else { + fsleep(ctx->cfg.timeout_us); + } + + return ret; +} + +static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave, + unsigned int reg, unsigned int val, + int mbq_size, bool deferrable) +{ + int shift = mbq_size * BITS_PER_BYTE; + int ret; + + while (--mbq_size > 0) { + shift -= BITS_PER_BYTE; + + ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), + (val >> shift) & 0xff); + if (ret < 0) + return ret; + } + + ret = sdw_write_no_pm(slave, reg, val & 0xff); + if (deferrable && ret == -ENODATA) + return -EAGAIN; + + return ret; +} + static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val) { - struct device *dev = context; + struct regmap_mbq_context *ctx = context; + struct device *dev = ctx->dev; struct sdw_slave *slave = dev_to_sdw_dev(dev); + bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg); + int mbq_size = regmap_sdw_mbq_size(ctx, reg); int ret; - ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff); - if (ret < 0) - return ret; + if (mbq_size < 0) + return mbq_size; + + /* + * Technically the spec does allow a device to set itself to busy for + * internal reasons, but since it doesn't provide any information on + * how to handle timeouts in that case, for now the code will only + * process a single wait/timeout on function busy and a single retry + * of the transaction. + */ + ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable); + if (ret == -EAGAIN) { + ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx); + if (ret) + return ret; + + ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false); + } + + return ret; +} + +static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave, + unsigned int reg, unsigned int *val, + int mbq_size, bool deferrable) +{ + int shift = BITS_PER_BYTE; + int read; + + read = sdw_read_no_pm(slave, reg); + if (read < 0) { + if (deferrable && read == -ENODATA) + return -EAGAIN; + + return read; + } + + *val = read; + + while (--mbq_size > 0) { + read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg)); + if (read < 0) + return read; + + *val |= read << shift; + shift += BITS_PER_BYTE; + } - return sdw_write_no_pm(slave, reg, val & 0xff); + return 0; } static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val) { - struct device *dev = context; + struct regmap_mbq_context *ctx = context; + struct device *dev = ctx->dev; struct sdw_slave *slave = dev_to_sdw_dev(dev); - int read0; - int read1; + bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg); + int mbq_size = regmap_sdw_mbq_size(ctx, reg); + int ret; - read0 = sdw_read_no_pm(slave, reg); - if (read0 < 0) - return read0; + if (mbq_size < 0) + return mbq_size; - read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg)); - if (read1 < 0) - return read1; + /* + * Technically the spec does allow a device to set itself to busy for + * internal reasons, but since it doesn't provide any information on + * how to handle timeouts in that case, for now the code will only + * process a single wait/timeout on function busy and a single retry + * of the transaction. + */ + ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable); + if (ret == -EAGAIN) { + ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx); + if (ret) + return ret; - *val = (read1 << 8) | read0; + ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false); + } - return 0; + return ret; } static const struct regmap_bus regmap_sdw_mbq = { @@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = { static int regmap_sdw_mbq_config_check(const struct regmap_config *config) { - /* MBQ-based controls are only 16-bits for now */ - if (config->val_bits != 16) + if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE)) return -ENOTSUPP; /* Registers are 32 bits wide */ @@ -65,35 +206,69 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config) return 0; } +static struct regmap_mbq_context * +regmap_sdw_mbq_gen_context(struct device *dev, + const struct regmap_config *config, + const struct regmap_sdw_mbq_cfg *mbq_config) +{ + struct regmap_mbq_context *ctx; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->dev = dev; + + if (mbq_config) + ctx->cfg = *mbq_config; + + ctx->val_size = config->val_bits / BITS_PER_BYTE; + ctx->readable_reg = config->readable_reg; + + return ctx; +} + struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw, const struct regmap_config *config, + const struct regmap_sdw_mbq_cfg *mbq_config, struct lock_class_key *lock_key, const char *lock_name) { + struct regmap_mbq_context *ctx; int ret; ret = regmap_sdw_mbq_config_check(config); if (ret) return ERR_PTR(ret); - return __regmap_init(&sdw->dev, ®map_sdw_mbq, - &sdw->dev, config, lock_key, lock_name); + ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __regmap_init(&sdw->dev, ®map_sdw_mbq, ctx, + config, lock_key, lock_name); } EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq); struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, const struct regmap_config *config, + const struct regmap_sdw_mbq_cfg *mbq_config, struct lock_class_key *lock_key, const char *lock_name) { + struct regmap_mbq_context *ctx; int ret; ret = regmap_sdw_mbq_config_check(config); if (ret) return ERR_PTR(ret); - return __devm_regmap_init(&sdw->dev, ®map_sdw_mbq, - &sdw->dev, config, lock_key, lock_name); + ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __devm_regmap_init(&sdw->dev, ®map_sdw_mbq, ctx, + config, lock_key, lock_name); } EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 5962ea1230a1..f2843f814675 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -769,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev, map->alloc_flags = GFP_KERNEL; map->reg_base = config->reg_base; + map->reg_shift = config->pad_bits % 8; - map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map->format.pad_bytes = config->pad_bits / 8; map->format.reg_shift = config->reg_shift; - map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); - map->format.buf_size = DIV_ROUND_UP(config->reg_bits + - config->val_bits + config->pad_bits, 8); - map->reg_shift = config->pad_bits % 8; + map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits); + map->format.val_bytes = BITS_TO_BYTES(config->val_bits); + map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits); if (config->reg_stride) map->reg_stride = config->reg_stride; else @@ -3116,7 +3115,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, EXPORT_SYMBOL_GPL(regmap_fields_read); static int _regmap_bulk_read(struct regmap *map, unsigned int reg, - unsigned int *regs, void *val, size_t val_count) + const unsigned int *regs, void *val, size_t val_count) { u32 *u32 = val; u16 *u16 = val; @@ -3210,7 +3209,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read); * A value of zero will be returned on success, a negative errno will * be returned in error cases. */ -int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val, +int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val, size_t val_count) { if (val_count == 0) diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index eb6eb25b343b..deda7f35a059 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, if (prop->is_inline) return -EINVAL; - if (index * sizeof(*ref) >= prop->length) + if ((index + 1) * sizeof(*ref) > prop->length) return -ENOENT; ref_array = prop->pointer; @@ -677,6 +677,7 @@ static const struct fwnode_operations software_node_ops = { .get = software_node_get, .put = software_node_put, .property_present = software_node_property_present, + .property_read_bool = software_node_property_present, .property_read_int_array = software_node_read_int_array, .property_read_string_array = software_node_read_string_array, .get_name = software_node_get_name, @@ -1079,6 +1080,7 @@ void software_node_notify(struct device *dev) if (!swnode) return; + kobject_get(&swnode->kobj); ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node"); if (ret) return; @@ -1088,8 +1090,6 @@ void software_node_notify(struct device *dev) sysfs_remove_link(&dev->kobj, "software_node"); return; } - - kobject_get(&swnode->kobj); } void software_node_notify_remove(struct device *dev) diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 5c7fac80611c..2756870615cc 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE config DM_KUNIT_TEST tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS config DRIVER_PE_KUNIT_TEST tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c index ea05b8785743..6355a2231b74 100644 --- a/drivers/base/test/platform-device-test.c +++ b/drivers/base/test/platform-device-test.c @@ -1,8 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 +#include <kunit/platform_device.h> #include <kunit/resource.h> #include <linux/device.h> +#include <linux/device/bus.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #define DEVICE_NAME "test" @@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = { .test_cases = platform_device_devm_tests, }; -kunit_test_suite(platform_device_devm_test_suite); +static void platform_device_find_by_null_test(struct kunit *test) +{ + struct platform_device *pdev; + int ret; + + pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev); + + ret = kunit_platform_device_add(test, pdev); + KUNIT_ASSERT_EQ(test, ret, 0); + + KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL); + + KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL); + KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL); + KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL); + + KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL)); + KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL)); + KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL)); + KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL)); +} + +static struct kunit_case platform_device_match_tests[] = { + KUNIT_CASE(platform_device_find_by_null_test), + {} +}; + +static struct kunit_suite platform_device_match_test_suite = { + .name = "platform-device-match", + .test_cases = platform_device_match_tests, +}; + +kunit_test_suites( + &platform_device_devm_test_suite, + &platform_device_match_test_suite, +); MODULE_DESCRIPTION("Test module for platform devices"); MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>"); |