diff options
Diffstat (limited to 'drivers/base')
32 files changed, 1050 insertions, 466 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index af0029d30dbe..1037169abb45 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, per_cpu(arch_freq_scale, i) = scale; } -DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; -EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); - -void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) -{ - per_cpu(cpu_scale, cpu) = capacity; -} - DEFINE_PER_CPU(unsigned long, hw_pressure); /** @@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus, } EXPORT_SYMBOL_GPL(topology_update_hw_pressure); -static ssize_t cpu_capacity_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); -} - static void update_topology_flags_workfn(struct work_struct *work); static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); -static DEVICE_ATTR_RO(cpu_capacity); - -static int cpu_capacity_sysctl_add(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_create_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int cpu_capacity_sysctl_remove(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_remove_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int register_cpu_capacity_sysctl(void) -{ - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", - cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); - - return 0; -} -subsys_initcall(register_cpu_capacity_sysctl); - static int update_topology; int topology_update_cpu_topology(void) diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index 95717d509ca9..12ffdd843756 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -217,7 +217,7 @@ static int auxiliary_bus_probe(struct device *dev) struct auxiliary_device *auxdev = to_auxiliary_dev(dev); int ret; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) { dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret); return ret; @@ -395,6 +395,116 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) } EXPORT_SYMBOL_GPL(auxiliary_driver_unregister); +static void auxiliary_device_release(struct device *dev) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + of_node_put(dev->of_node); + kfree(auxdev); +} + +/** + * auxiliary_device_create - create a device on the auxiliary bus + * @dev: parent device + * @modname: module name used to create the auxiliary driver name. + * @devname: auxiliary bus device name + * @platform_data: auxiliary bus device platform data + * @id: auxiliary bus device id + * + * Helper to create an auxiliary bus device. + * The device created matches driver 'modname.devname' on the auxiliary bus. + */ +struct auxiliary_device *auxiliary_device_create(struct device *dev, + const char *modname, + const char *devname, + void *platform_data, + int id) +{ + struct auxiliary_device *auxdev; + int ret; + + auxdev = kzalloc(sizeof(*auxdev), GFP_KERNEL); + if (!auxdev) + return NULL; + + auxdev->id = id; + auxdev->name = devname; + auxdev->dev.parent = dev; + auxdev->dev.platform_data = platform_data; + auxdev->dev.release = auxiliary_device_release; + device_set_of_node_from_dev(&auxdev->dev, dev); + + ret = auxiliary_device_init(auxdev); + if (ret) { + of_node_put(auxdev->dev.of_node); + kfree(auxdev); + return NULL; + } + + ret = __auxiliary_device_add(auxdev, modname); + if (ret) { + /* + * It may look odd but auxdev should not be freed here. + * auxiliary_device_uninit() calls device_put() which call + * the device release function, freeing auxdev. + */ + auxiliary_device_uninit(auxdev); + return NULL; + } + + return auxdev; +} +EXPORT_SYMBOL_GPL(auxiliary_device_create); + +/** + * auxiliary_device_destroy - remove an auxiliary device + * @auxdev: pointer to the auxdev to be removed + * + * Helper to remove an auxiliary device created with + * auxiliary_device_create() + */ +void auxiliary_device_destroy(void *auxdev) +{ + struct auxiliary_device *_auxdev = auxdev; + + auxiliary_device_delete(_auxdev); + auxiliary_device_uninit(_auxdev); +} +EXPORT_SYMBOL_GPL(auxiliary_device_destroy); + +/** + * __devm_auxiliary_device_create - create a managed device on the auxiliary bus + * @dev: parent device + * @modname: module name used to create the auxiliary driver name. + * @devname: auxiliary bus device name + * @platform_data: auxiliary bus device platform data + * @id: auxiliary bus device id + * + * Device managed helper to create an auxiliary bus device. + * The device created matches driver 'modname.devname' on the auxiliary bus. + */ +struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev, + const char *modname, + const char *devname, + void *platform_data, + int id) +{ + struct auxiliary_device *auxdev; + int ret; + + auxdev = auxiliary_device_create(dev, modname, devname, platform_data, id); + if (!auxdev) + return NULL; + + ret = devm_add_action_or_reset(dev, auxiliary_device_destroy, + auxdev); + if (ret) + return NULL; + + return auxdev; +} +EXPORT_SYMBOL_GPL(__devm_auxiliary_device_create); + void __init auxiliary_bus_init(void) { WARN_ON(bus_register(&auxiliary_bus_type)); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf0d455209d7..613410705a47 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/cacheinfo.h> #include <linux/compiler.h> @@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +static bool match_cache_node(struct device_node *cpu, + const struct device_node *cache_node) +{ + struct device_node *prev, *cache = of_find_next_cache_node(cpu); + + while (cache) { + if (cache == cache_node) { + of_node_put(cache); + return true; + } + + prev = cache; + cache = of_find_next_cache_node(cache); + of_node_put(prev); + } + + return false; +} + +#ifndef arch_compact_of_hwid +#define arch_compact_of_hwid(_x) (_x) +#endif + +static void cache_of_set_id(struct cacheinfo *this_leaf, + struct device_node *cache_node) +{ + struct device_node *cpu; + u32 min_id = ~0; + + for_each_of_cpu_node(cpu) { + u64 id = of_get_cpu_hwid(cpu, 0); + + id = arch_compact_of_hwid(id); + if (FIELD_GET(GENMASK_ULL(63, 32), id)) { + of_node_put(cpu); + return; + } + + if (match_cache_node(cpu, cache_node)) + min_id = min(min_id, id); + } + + if (min_id != ~0) { + this_leaf->id = min_id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +247,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) diff --git a/drivers/base/component.c b/drivers/base/component.c index abe60eb45c55..024ad9471b8a 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -586,7 +586,8 @@ EXPORT_SYMBOL_GPL(component_master_is_bound); static void component_unbind(struct component *component, struct aggregate_device *adev, void *data) { - WARN_ON(!component->bound); + if (WARN_ON(!component->bound)) + return; dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n", dev_name(component->dev), component, component->ops); diff --git a/drivers/base/core.c b/drivers/base/core.c index cbc0099d8ef2..d22d6b23e758 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -460,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev, struct device_link *link = to_devlink(dev); const char *output; - if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER)) output = "supplier unbind"; - else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) output = "consumer unbind"; else output = "never"; @@ -476,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev, { struct device_link *link = to_devlink(dev); - return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); + return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME)); } static DEVICE_ATTR_RO(runtime_pm); @@ -485,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev, { struct device_link *link = to_devlink(dev); - return sysfs_emit(buf, "%d\n", - !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); } static DEVICE_ATTR_RO(sync_state_only); @@ -792,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer, if (link->consumer != consumer) continue; - if (link->flags & DL_FLAG_INFERRED && + if (device_link_test(link, DL_FLAG_INFERRED) && !(flags & DL_FLAG_INFERRED)) link->flags &= ~DL_FLAG_INFERRED; if (flags & DL_FLAG_PM_RUNTIME) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) { pm_runtime_new_link(consumer); link->flags |= DL_FLAG_PM_RUNTIME; } @@ -807,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer, if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); - if (link->flags & DL_FLAG_SYNC_STATE_ONLY && - !(link->flags & DL_FLAG_STATELESS)) { + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) && + !device_link_test(link, DL_FLAG_STATELESS)) { link->flags |= DL_FLAG_STATELESS; goto reorder; } else { @@ -823,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer, * update the existing link to stay around longer. */ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { - if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) { link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; } @@ -831,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer, link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER); } - if (!(link->flags & DL_FLAG_MANAGED)) { + if (!device_link_test(link, DL_FLAG_MANAGED)) { kref_get(&link->kref); link->flags |= DL_FLAG_MANAGED; device_link_init_status(link, consumer, supplier); } - if (link->flags & DL_FLAG_SYNC_STATE_ONLY && + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) && !(flags & DL_FLAG_SYNC_STATE_ONLY)) { link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; goto reorder; @@ -940,7 +939,7 @@ static void __device_link_del(struct kref *kref) static void device_link_put_kref(struct device_link *link) { - if (link->flags & DL_FLAG_STATELESS) + if (device_link_test(link, DL_FLAG_STATELESS)) kref_put(&link->kref, __device_link_del); else if (!device_is_registered(link->consumer)) __device_link_del(&link->kref); @@ -1004,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev) if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { - WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } @@ -1072,14 +1071,14 @@ int device_links_check_suppliers(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE && - !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { + !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) { if (dev_is_best_effort(dev) && - link->flags & DL_FLAG_INFERRED && + device_link_test(link, DL_FLAG_INFERRED) && !link->supplier->can_match) { ret = -EAGAIN; continue; @@ -1128,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev, return; list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_ACTIVE) return; @@ -1268,7 +1267,7 @@ void device_links_force_bind(struct device *dev) device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE) { @@ -1329,7 +1328,7 @@ void device_links_driver_bound(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; /* @@ -1345,7 +1344,7 @@ void device_links_driver_bound(struct device *dev) WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); - if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER)) driver_deferred_probe_add(link->consumer); } @@ -1357,11 +1356,11 @@ void device_links_driver_bound(struct device *dev) list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { struct device *supplier; - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; supplier = link->supplier; - if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { + if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) { /* * When DL_FLAG_SYNC_STATE_ONLY is set, it means no * other DL_MANAGED_LINK_FLAGS have been set. So, it's @@ -1369,7 +1368,7 @@ void device_links_driver_bound(struct device *dev) */ device_link_drop_managed(link); } else if (dev_is_best_effort(dev) && - link->flags & DL_FLAG_INFERRED && + device_link_test(link, DL_FLAG_INFERRED) && link->status != DL_STATE_CONSUMER_PROBE && !link->supplier->can_match) { /* @@ -1421,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev) struct device_link *link, *ln; list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; - if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) { device_link_drop_managed(link); continue; } @@ -1436,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev) if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { - WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } @@ -1461,7 +1460,7 @@ void device_links_no_driver(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; /* @@ -1498,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev) device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; - WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); + WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)); WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); /* @@ -1510,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev) * has moved to DL_STATE_SUPPLIER_UNBIND. */ if (link->status == DL_STATE_SUPPLIER_UNBIND && - link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER)) device_link_drop_managed(link); WRITE_ONCE(link->status, DL_STATE_DORMANT); @@ -1544,7 +1543,7 @@ bool device_links_busy(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (!(link->flags & DL_FLAG_MANAGED)) + if (!device_link_test(link, DL_FLAG_MANAGED)) continue; if (link->status == DL_STATE_CONSUMER_PROBE @@ -1586,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev) list_for_each_entry(link, &dev->links.consumers, s_node) { enum device_link_state status; - if (!(link->flags & DL_FLAG_MANAGED) || - link->flags & DL_FLAG_SYNC_STATE_ONLY) + if (!device_link_test(link, DL_FLAG_MANAGED) || + device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) continue; status = link->status; @@ -1743,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) static void fw_devlink_relax_link(struct device_link *link) { - if (!(link->flags & DL_FLAG_INFERRED)) + if (!device_link_test(link, DL_FLAG_INFERRED)) return; if (device_link_flag_is_sync_state_only(link->flags)) @@ -1779,7 +1778,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data) struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; - if (!(link->flags & DL_FLAG_MANAGED) || + if (!device_link_test(link, DL_FLAG_MANAGED) || link->status == DL_STATE_ACTIVE || sup->state_synced || !dev_has_sync_state(sup)) return 0; @@ -1881,8 +1880,6 @@ static void fw_devlink_unblock_consumers(struct device *dev) device_links_write_unlock(); } -#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) - static bool fwnode_init_without_drv(struct fwnode_handle *fwnode) { struct device *dev; @@ -2063,7 +2060,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, * such due to a cycle. */ if (device_link_flag_is_sync_state_only(dev_link->flags) && - !(dev_link->flags & DL_FLAG_CYCLE)) + !device_link_test(dev_link, DL_FLAG_CYCLE)) continue; if (__fw_devlink_relax_cycles(con_handle, @@ -5281,6 +5278,12 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(device_set_node); +struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode) +{ + return get_device((fwnode)->dev); +} +EXPORT_SYMBOL_GPL(get_dev_from_fwnode); + int device_match_name(struct device *dev, const void *name) { return sysfs_streq(dev_name(dev), name); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 50651435577c..efc575a00edd 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -600,7 +600,9 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(ghostwrite); +CPU_SHOW_VULN_FALLBACK(old_microcode); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); +CPU_SHOW_VULN_FALLBACK(tsa); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -617,7 +619,9 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); +static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); +static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -635,7 +639,9 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, &dev_attr_ghostwrite.attr, + &dev_attr_old_microcode.attr, &dev_attr_indirect_target_selection.attr, + &dev_attr_tsa.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b526e0e0f52d..13ab98e033ea 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -25,6 +25,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/async.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/devinfo.h> #include <linux/slab.h> @@ -552,6 +553,7 @@ static void device_unbind_cleanup(struct device *dev) dev->dma_range_map = NULL; device_set_driver(dev, NULL); dev_set_drvdata(dev, NULL); + dev_pm_domain_detach(dev, dev->power.detach_power_off); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); pm_runtime_reinit(dev); diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 03a39c417dc4..37faf6156d7c 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -140,7 +140,7 @@ static const struct bin_attribute *const devcd_dev_bin_attrs[] = { }; static const struct attribute_group devcd_dev_group = { - .bin_attrs_new = devcd_dev_bin_attrs, + .bin_attrs = devcd_dev_bin_attrs, }; static const struct attribute_group *devcd_dev_groups[] = { diff --git a/drivers/base/devres.c b/drivers/base/devres.c index d8a733ea5e1a..ff55e1bcfa30 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -759,6 +759,17 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co } EXPORT_SYMBOL_GPL(__devm_add_action); +bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres devres = { + .data = data, + .action = action, + }; + + return devres_find(dev, devm_action_release, devm_action_match, &devres); +} +EXPORT_SYMBOL_GPL(devm_is_action_added); + /** * devm_remove_action_nowarn() - removes previously added custom action * @dev: Device that owns the action @@ -976,17 +987,10 @@ EXPORT_SYMBOL_GPL(devm_krealloc); */ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) { - size_t size; - char *buf; - if (!s) return NULL; - size = strlen(s) + 1; - buf = devm_kmalloc(dev, size, gfp); - if (buf) - memcpy(buf, s, size); - return buf; + return devm_kmemdup(dev, s, strlen(s) + 1, gfp); } EXPORT_SYMBOL_GPL(devm_kstrdup); diff --git a/drivers/base/faux.c b/drivers/base/faux.c index 407c1d1aad50..f5fbda0a9a44 100644 --- a/drivers/base/faux.c +++ b/drivers/base/faux.c @@ -25,6 +25,7 @@ struct faux_object { struct faux_device faux_dev; const struct faux_device_ops *faux_ops; + const struct attribute_group **groups; }; #define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev) @@ -43,10 +44,21 @@ static int faux_probe(struct device *dev) struct faux_object *faux_obj = to_faux_object(dev); struct faux_device *faux_dev = &faux_obj->faux_dev; const struct faux_device_ops *faux_ops = faux_obj->faux_ops; - int ret = 0; + int ret; - if (faux_ops && faux_ops->probe) + if (faux_ops && faux_ops->probe) { ret = faux_ops->probe(faux_dev); + if (ret) + return ret; + } + + /* + * Add groups after the probe succeeds to ensure resources are + * initialized correctly + */ + ret = device_add_groups(dev, faux_obj->groups); + if (ret && faux_ops && faux_ops->remove) + faux_ops->remove(faux_dev); return ret; } @@ -57,6 +69,8 @@ static void faux_remove(struct device *dev) struct faux_device *faux_dev = &faux_obj->faux_dev; const struct faux_device_ops *faux_ops = faux_obj->faux_ops; + device_remove_groups(dev, faux_obj->groups); + if (faux_ops && faux_ops->remove) faux_ops->remove(faux_dev); } @@ -72,6 +86,7 @@ static struct device_driver faux_driver = { .name = "faux_driver", .bus = &faux_bus_type, .probe_type = PROBE_FORCE_SYNCHRONOUS, + .suppress_bind_attrs = true, }; static void faux_device_release(struct device *dev) @@ -124,8 +139,9 @@ struct faux_device *faux_device_create_with_groups(const char *name, if (!faux_obj) return NULL; - /* Save off the callbacks so we can use them in the future */ + /* Save off the callbacks and groups so we can use them in the future */ faux_obj->faux_ops = faux_ops; + faux_obj->groups = groups; /* Initialize the device portion and register it with the driver core */ faux_dev = &faux_obj->faux_dev; @@ -138,7 +154,6 @@ struct faux_device *faux_device_create_with_groups(const char *name, else dev->parent = &faux_bus_root; dev->bus = &faux_bus_type; - dev->groups = groups; dev_set_name(dev, "%s", name); ret = device_add(dev); @@ -155,7 +170,7 @@ struct faux_device *faux_device_create_with_groups(const char *name, * successful is almost impossible to determine by the caller. */ if (!dev->driver) { - dev_err(dev, "probe did not succeed, tearing down the device\n"); + dev_dbg(dev, "probe did not succeed, tearing down the device\n"); faux_device_destroy(faux_dev); faux_dev = NULL; } diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig index a03701674265..752b9a9bea03 100644 --- a/drivers/base/firmware_loader/Kconfig +++ b/drivers/base/firmware_loader/Kconfig @@ -3,8 +3,7 @@ menu "Firmware loader" config FW_LOADER tristate "Firmware loading facility" if EXPERT - select CRYPTO_HASH if FW_LOADER_DEBUG - select CRYPTO_SHA256 if FW_LOADER_DEBUG + select CRYPTO_LIB_SHA256 if FW_LOADER_DEBUG default y help This enables the firmware loading facility in the kernel. The kernel @@ -28,7 +27,6 @@ config FW_LOADER config FW_LOADER_DEBUG bool "Log filenames and checksums for loaded firmware" - depends on CRYPTO = FW_LOADER || CRYPTO=y depends on DYNAMIC_DEBUG depends on FW_LOADER default FW_LOADER diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index cb0912ea3e62..6942c62fa59d 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -806,41 +806,15 @@ static void fw_abort_batch_reqs(struct firmware *fw) } #if defined(CONFIG_FW_LOADER_DEBUG) -#include <crypto/hash.h> #include <crypto/sha2.h> static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device) { - struct shash_desc *shash; - struct crypto_shash *alg; - u8 *sha256buf; - char *outbuf; + u8 digest[SHA256_DIGEST_SIZE]; - alg = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(alg)) - return; - - sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); - outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL); - shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL); - if (!sha256buf || !outbuf || !shash) - goto out_free; - - shash->tfm = alg; - - if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0) - goto out_free; - - for (int i = 0; i < SHA256_DIGEST_SIZE; i++) - sprintf(&outbuf[i * 2], "%02x", sha256buf[i]); - outbuf[SHA256_BLOCK_SIZE] = 0; - dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf); - -out_free: - kfree(shash); - kfree(outbuf); - kfree(sha256buf); - crypto_free_shash(alg); + sha256(fw->data, fw->size, digest); + dev_dbg(device, "Loaded FW: %s, sha256: %*phN\n", + name, SHA256_DIGEST_SIZE, digest); } #else static void fw_log_firmware_info(const struct firmware *fw, const char *name, @@ -848,26 +822,6 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, {} #endif -/* - * Reject firmware file names with ".." path components. - * There are drivers that construct firmware file names from device-supplied - * strings, and we don't want some device to be able to tell us "I would like to - * be sent my firmware from ../../../etc/shadow, please". - * - * Search for ".." surrounded by either '/' or start/end of string. - * - * This intentionally only looks at the firmware name, not at the firmware base - * directory or at symlink contents. - */ -static bool name_contains_dotdot(const char *name) -{ - size_t name_len = strlen(name); - - return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || - strstr(name, "/../") != NULL || - (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); -} - /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -888,6 +842,17 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } + + /* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from + * device-supplied strings, and we don't want some device to be + * able to tell us "I would like to be sent my firmware from + * ../../../etc/shadow, please". + * + * This intentionally only looks at the firmware name, not at + * the firmware base directory or at symlink contents. + */ if (name_contains_dotdot(name)) { dev_warn(device, "Firmware load for '%s' refused, path contains '..' component\n", diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index d254ceb56d84..add0b9b75edd 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -359,8 +359,8 @@ out: static const struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, - .read_new = firmware_data_read, - .write_new = firmware_data_write, + .read = firmware_data_read, + .write = firmware_data_write, }; static struct attribute *fw_dev_attrs[] = { @@ -381,7 +381,7 @@ static const struct bin_attribute *const fw_dev_bin_attrs[] = { static const struct attribute_group fw_dev_attr_group = { .attrs = fw_dev_attrs, - .bin_attrs_new = fw_dev_bin_attrs, + .bin_attrs = fw_dev_bin_attrs, #ifdef CONFIG_FW_UPLOAD .is_visible = fw_upload_is_visible, #endif diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 19469e7f88c2..5c6c1d6bb59f 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -22,6 +22,7 @@ #include <linux/stat.h> #include <linux/slab.h> #include <linux/xarray.h> +#include <linux/export.h> #include <linux/atomic.h> #include <linux/uaccess.h> @@ -48,22 +49,8 @@ int mhp_online_type_from_str(const char *str) #define to_memory_block(dev) container_of(dev, struct memory_block, dev) -static int sections_per_block; - -static inline unsigned long memory_block_id(unsigned long section_nr) -{ - return section_nr / sections_per_block; -} - -static inline unsigned long pfn_to_block_id(unsigned long pfn) -{ - return memory_block_id(pfn_to_section_nr(pfn)); -} - -static inline unsigned long phys_to_block_id(unsigned long phys) -{ - return pfn_to_block_id(PFN_DOWN(phys)); -} +int sections_per_block; +EXPORT_SYMBOL(sections_per_block); static int memory_subsys_online(struct device *dev); static int memory_subsys_offline(struct device *dev); @@ -110,6 +97,57 @@ static void memory_block_release(struct device *dev) kfree(mem); } + +/* Max block size to be set by memory_block_advise_max_size */ +static unsigned long memory_block_advised_size; +static bool memory_block_advised_size_queried; + +/** + * memory_block_advise_max_size() - advise memory hotplug on the max suggested + * block size, usually for alignment. + * @size: suggestion for maximum block size. must be aligned on power of 2. + * + * Early boot software (pre-allocator init) may advise archs on the max block + * size. This value can only decrease after initialization, as the intent is + * to identify the largest supported alignment for all sources. + * + * Use of this value is arch-defined, as is min/max block size. + * + * Return: 0 on success + * -EINVAL if size is 0 or not pow2 aligned + * -EBUSY if value has already been probed + */ +int __init memory_block_advise_max_size(unsigned long size) +{ + if (!size || !is_power_of_2(size)) + return -EINVAL; + + if (memory_block_advised_size_queried) + return -EBUSY; + + if (memory_block_advised_size) + memory_block_advised_size = min(memory_block_advised_size, size); + else + memory_block_advised_size = size; + + return 0; +} + +/** + * memory_block_advised_max_size() - query advised max hotplug block size. + * + * After the first call, the value can never change. Callers looking for the + * actual block size should use memory_block_size_bytes. This interface is + * intended for use by arch-init when initializing the hotplug block size. + * + * Return: advised size in bytes, or 0 if never set. + */ +unsigned long memory_block_advised_max_size(void) +{ + memory_block_advised_size_queried = true; + return memory_block_advised_size; +} + unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; @@ -632,7 +670,7 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn) * * Called under device_hotplug_lock. */ -static struct memory_block *find_memory_block_by_id(unsigned long block_id) +struct memory_block *find_memory_block_by_id(unsigned long block_id) { struct memory_block *mem; diff --git a/drivers/base/node.c b/drivers/base/node.c index cd13ef287011..3399594136b2 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/memory.h> +#include <linux/mempolicy.h> #include <linux/vmstat.h> #include <linux/notifier.h> #include <linux/node.h> @@ -20,6 +21,7 @@ #include <linux/pm_runtime.h> #include <linux/swap.h> #include <linux/slab.h> +#include <linux/memblock.h> static const struct bus_type node_subsys = { .name = "node", @@ -110,6 +112,27 @@ static const struct attribute_group *node_access_node_groups[] = { NULL, }; +#ifdef CONFIG_MEMORY_HOTPLUG +static BLOCKING_NOTIFIER_HEAD(node_chain); + +int register_node_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&node_chain, nb); +} +EXPORT_SYMBOL(register_node_notifier); + +void unregister_node_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&node_chain, nb); +} +EXPORT_SYMBOL(unregister_node_notifier); + +int node_notify(unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&node_chain, val, v); +} +#endif + static void node_remove_accesses(struct node *node) { struct node_access_nodes *c, *cnext; @@ -214,6 +237,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, break; } } + + /* When setting CPU access coordinates, update mempolicy */ + if (access == ACCESS_COORDINATE_CPU) { + if (mempolicy_set_node_perf(nid, coord)) { + pr_info("failed to set mempolicy attrs for node %d\n", + nid); + } + } } EXPORT_SYMBOL_GPL(node_set_perf_attrs); @@ -468,8 +499,8 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_PAGETABLE)), nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), nid, 0UL, - nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), - nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + nid, 0UL, + nid, 0UL, nid, K(sreclaimable + node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), nid, K(sreclaimable + sunreclaimable), @@ -588,7 +619,7 @@ static const struct bin_attribute *node_dev_bin_attrs[] = { static const struct attribute_group node_dev_group = { .attrs = node_dev_attrs, - .bin_attrs_new = node_dev_bin_attrs, + .bin_attrs = node_dev_bin_attrs, }; static const struct attribute_group *node_dev_groups[] = { @@ -628,6 +659,7 @@ static int register_node(struct node *node, int num) } else { hugetlb_register_node(node); compaction_register_node(node); + reclaim_register_node(node); } return error; @@ -644,6 +676,7 @@ void unregister_node(struct node *node) { hugetlb_unregister_node(node); compaction_unregister_node(node); + reclaim_unregister_node(node); node_remove_accesses(node); node_remove_caches(node); device_unregister(&node->dev); @@ -747,15 +780,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) } #ifdef CONFIG_MEMORY_HOTPLUG -static int __ref get_nid_for_pfn(unsigned long pfn) -{ -#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT - if (system_state < SYSTEM_RUNNING) - return early_pfn_to_nid(pfn); -#endif - return pfn_to_nid(pfn); -} - static void do_register_memory_block_under_node(int nid, struct memory_block *mem_blk, enum meminit_context context) @@ -782,46 +806,6 @@ static void do_register_memory_block_under_node(int nid, ret); } -/* register memory section under specified node if it spans that node */ -static int register_mem_block_under_node_early(struct memory_block *mem_blk, - void *arg) -{ - unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; - unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); - unsigned long end_pfn = start_pfn + memory_block_pfns - 1; - int nid = *(int *)arg; - unsigned long pfn; - - for (pfn = start_pfn; pfn <= end_pfn; pfn++) { - int page_nid; - - /* - * memory block could have several absent sections from start. - * skip pfn range from absent section - */ - if (!pfn_in_present_section(pfn)) { - pfn = round_down(pfn + PAGES_PER_SECTION, - PAGES_PER_SECTION) - 1; - continue; - } - - /* - * We need to check if page belongs to nid only at the boot - * case because node's ranges can be interleaved. - */ - page_nid = get_nid_for_pfn(pfn); - if (page_nid < 0) - continue; - if (page_nid != nid) - continue; - - do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY); - return 0; - } - /* mem section does not span the specified node */ - return 0; -} - /* * During hotplug we know that all pages in the memory block belong to the same * node. @@ -850,24 +834,44 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -void register_memory_blocks_under_node(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context) +/* register all memory blocks under the corresponding nodes */ +static void register_memory_blocks_under_nodes(void) { - walk_memory_blocks_func_t func; + struct memblock_region *r; - if (context == MEMINIT_HOTPLUG) - func = register_mem_block_under_node_hotplug; - else - func = register_mem_block_under_node_early; + for_each_mem_region(r) { + const unsigned long start_block_id = phys_to_block_id(r->base); + const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1); + const int nid = memblock_get_region_node(r); + unsigned long block_id; + if (!node_online(nid)) + continue; + + for (block_id = start_block_id; block_id <= end_block_id; block_id++) { + struct memory_block *mem; + + mem = find_memory_block_by_id(block_id); + if (!mem) + continue; + + do_register_memory_block_under_node(nid, mem, MEMINIT_EARLY); + put_device(&mem->dev); + } + + } +} + +void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn, + unsigned long end_pfn) +{ walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), - (void *)&nid, func); + (void *)&nid, register_mem_block_under_node_hotplug); return; } #endif /* CONFIG_MEMORY_HOTPLUG */ -int __register_one_node(int nid) +int register_one_node(int nid) { int error; int cpu; @@ -971,11 +975,13 @@ void __init node_dev_init(void) /* * Create all node devices, which will properly link the node - * to applicable memory block devices and already created cpu devices. + * to already created cpu devices. */ for_each_online_node(i) { - ret = register_one_node(i); + ret = register_one_node(i); if (ret) panic("%s() failed to add node: %d\n", __func__, ret); } + + register_memory_blocks_under_nodes(); } diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 0e60dd650b5e..70db08f3ac6f 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); void platform_device_msi_free_irqs_all(struct device *dev) { msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN); } EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index cfccf3ff36e7..09450349cf32 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -982,7 +982,7 @@ struct platform_device * __init_or_module __platform_create_bundle( struct platform_device *pdev; int error; - pdev = platform_device_alloc(driver->driver.name, -1); + pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE); if (!pdev) { error = -ENOMEM; goto err_out; @@ -1396,15 +1396,13 @@ static int platform_probe(struct device *_dev) if (ret < 0) return ret; - ret = dev_pm_domain_attach(_dev, true); + ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON | + PD_FLAG_DETACH_POWER_OFF); if (ret) goto out; - if (drv->probe) { + if (drv->probe) ret = drv->probe(dev); - if (ret) - dev_pm_domain_detach(_dev, true); - } out: if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { @@ -1422,7 +1420,6 @@ static void platform_remove(struct device *_dev) if (drv->remove) drv->remove(dev); - dev_pm_domain_detach(_dev, true); } static void platform_shutdown(struct device *_dev) diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 781968a128ff..6ecf9ce4a4e6 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); /** * dev_pm_domain_attach - Attach a device to its PM domain. * @dev: Device to attach. - * @power_on: Used to indicate whether we should power on the device. + * @flags: indicate whether we should power on/off the device on attach/detach * * The @dev may only be attached to a single PM domain. By iterating through * the available alternatives we try to find a valid PM domain for the device. @@ -100,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); * Returns 0 on successfully attached PM domain, or when it is found that the * device doesn't need a PM domain, else a negative error code. */ -int dev_pm_domain_attach(struct device *dev, bool power_on) +int dev_pm_domain_attach(struct device *dev, u32 flags) { int ret; if (dev->pm_domain) return 0; - ret = acpi_dev_pm_attach(dev, power_on); + ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON)); if (!ret) ret = genpd_dev_pm_attach(dev); + if (dev->pm_domain) + dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF); + return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(dev_pm_domain_attach); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c8b0a9e29ed8..dbf5456cd891 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -63,8 +63,23 @@ static LIST_HEAD(dpm_noirq_list); static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; +static DEFINE_MUTEX(async_wip_mtx); static int async_error; +/** + * pm_hibernate_is_recovering - if recovering from hibernate due to error. + * + * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or + * recovering from some error. + * + * Return: true for error case, false for normal case. + */ +bool pm_hibernate_is_recovering(void) +{ + return pm_transition.event == PM_EVENT_RECOVER; +} +EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering); + static const char *pm_verb(int event) { switch (event) { @@ -512,7 +527,7 @@ struct dpm_watchdog { */ static void dpm_watchdog_handler(struct timer_list *t) { - struct dpm_watchdog *wd = from_timer(wd, t, timer); + struct dpm_watchdog *wd = timer_container_of(wd, t, timer); struct timer_list *timer = &wd->timer; unsigned int time_left; @@ -560,7 +575,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) struct timer_list *timer = &wd->timer; timer_delete_sync(timer); - destroy_timer_on_stack(timer); + timer_destroy_on_stack(timer); } #else #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) @@ -597,8 +612,11 @@ static bool is_async(struct device *dev) && !pm_trace_is_enabled(); } -static bool dpm_async_fn(struct device *dev, async_func_t func) +static bool __dpm_async(struct device *dev, async_func_t func) { + if (dev->power.work_in_progress) + return true; + if (!is_async(dev)) return false; @@ -611,14 +629,57 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) put_device(dev); + return false; +} + +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + guard(mutex)(&async_wip_mtx); + + return __dpm_async(dev, func); +} + +static int dpm_async_with_cleanup(struct device *dev, void *fn) +{ + guard(mutex)(&async_wip_mtx); + + if (!__dpm_async(dev, fn)) + dev->power.work_in_progress = false; + + return 0; +} + +static void dpm_async_resume_children(struct device *dev, async_func_t func) +{ /* - * async_schedule_dev_nocall() above has returned false, so func() is - * not running and it is safe to update power.work_in_progress without - * extra synchronization. + * Prevent racing with dpm_clear_async_state() during initial list + * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and + * dpm_resume(). */ - dev->power.work_in_progress = false; + guard(mutex)(&dpm_list_mtx); - return false; + /* + * Start processing "async" children of the device unless it's been + * started already for them. + */ + device_for_each_child(dev, func, dpm_async_with_cleanup); +} + +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); } static void dpm_clear_async_state(struct device *dev) @@ -627,6 +688,20 @@ static void dpm_clear_async_state(struct device *dev) dev->power.work_in_progress = false; } +static bool dpm_root_device(struct device *dev) +{ + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie); + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -706,10 +781,12 @@ Out: TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } + + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -733,19 +810,20 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_noirq_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_noirq); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_noirq); } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_noirq)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -760,7 +838,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "noirq"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); @@ -781,6 +859,8 @@ void dpm_resume_noirq(pm_message_t state) device_wakeup_disarm_wake_irqs(); } +static void async_resume_early(void *data, async_cookie_t cookie); + /** * device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. @@ -844,10 +924,12 @@ Out: complete_all(&dev->power.completion); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } + + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -875,19 +957,20 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_late_early_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_early); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_early); } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_early)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -902,7 +985,7 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "early"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_EARLY); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); @@ -919,6 +1002,8 @@ void dpm_resume_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_resume_start); +static void async_resume(void *data, async_cookie_t cookie); + /** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. @@ -941,6 +1026,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Complete; + dev->power.is_suspended = false; + if (dev->power.direct_complete) { /* * Allow new children to be added under the device after this @@ -1003,7 +1090,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1014,10 +1100,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } + + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1041,7 +1129,6 @@ void dpm_resume(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume"), state.event, true); - might_sleep(); pm_transition = state; async_error = 0; @@ -1049,19 +1136,20 @@ void dpm_resume(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_suspended_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume); } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1076,7 +1164,7 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, NULL); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME); cpufreq_resume(); @@ -1143,7 +1231,6 @@ void dpm_complete(pm_message_t state) struct list_head list; trace_suspend_resume(TPS("dpm_complete"), state.event, true); - might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); @@ -1182,6 +1269,7 @@ void dpm_complete(pm_message_t state) void dpm_resume_end(pm_message_t state) { dpm_resume(state); + pm_restore_gfp_mask(); dpm_complete(state); } EXPORT_SYMBOL_GPL(dpm_resume_end); @@ -1189,6 +1277,82 @@ EXPORT_SYMBOL_GPL(dpm_resume_end); /*------------------------- Suspend routines -------------------------*/ +static bool dpm_leaf_device(struct device *dev) +{ + struct device *child; + + lockdep_assert_held(&dpm_list_mtx); + + child = device_find_any_child(dev); + if (child) { + put_device(child); + + return false; + } + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return list_empty(&dev->links.consumers); +} + +static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) +{ + guard(mutex)(&dpm_list_mtx); + + /* + * If the device is suspended asynchronously and the parent's callback + * deletes both the device and the parent itself, the parent object may + * be freed while this function is running, so avoid that by checking + * if the device has been deleted already as the parent cannot be + * deleted before it. + */ + if (!device_pm_initialized(dev)) + return false; + + /* Start processing the device's parent if it is "async". */ + if (dev->parent) + dpm_async_with_cleanup(dev->parent, func); + + return true; +} + +static void dpm_async_suspend_superior(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + if (!dpm_async_suspend_parent(dev, func)) + return; + + idx = device_links_read_lock(); + + /* Start processing the device's "async" suppliers. */ + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->supplier, func); + + device_links_read_unlock(idx); +} + +static void dpm_async_suspend_complete_all(struct list_head *device_list) +{ + struct device *dev; + + guard(mutex)(&async_wip_mtx); + + list_for_each_entry_reverse(dev, device_list, power.entry) { + /* + * In case the device is being waited for and async processing + * has not started for it yet, let the waiters make progress. + */ + if (!dev->power.work_in_progress) + complete_all(&dev->power.completion); + } +} + /** * resume_event - Return a "resume" message for given "suspend" sleep state. * @sleep_state: PM message representing a sleep state. @@ -1226,6 +1390,8 @@ static void dpm_superior_set_must_resume(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend_noirq(void *data, async_cookie_t cookie); + /** * device_suspend_noirq - Execute a "noirq suspend" callback for given device. * @dev: Device to handle. @@ -1235,7 +1401,7 @@ static void dpm_superior_set_must_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1246,7 +1412,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (dev->power.syscore || dev->power.direct_complete) @@ -1279,7 +1445,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); goto Complete; @@ -1304,7 +1470,11 @@ Skip: Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || READ_ONCE(async_error)) + return; + + dpm_async_suspend_superior(dev, async_suspend_noirq); } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1318,7 +1488,8 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie) static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); - int error = 0; + struct device *dev; + int error; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1327,12 +1498,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_noirq); + } + while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.prev); + dev = to_device(dpm_late_early_list.prev); list_move(&dev->power.entry, &dpm_noirq_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1340,22 +1520,28 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state, false); + device_suspend_noirq(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_late_early_list); + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice_init(&dpm_late_early_list, &dpm_noirq_list); break; + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); @@ -1400,6 +1586,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) spin_unlock_irq(&parent->power.lock); } +static void async_suspend_late(void *data, async_cookie_t cookie); + /** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. @@ -1408,7 +1596,7 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state, bool async) +static void device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1425,11 +1613,11 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (pm_wakeup_pending()) { - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1463,7 +1651,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async late" : " late", error); goto Complete; @@ -1476,7 +1664,11 @@ Skip: Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - return error; + + if (error || READ_ONCE(async_error)) + return; + + dpm_async_suspend_superior(dev, async_suspend_late); } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1494,7 +1686,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie) int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); - int error = 0; + struct device *dev; + int error; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1505,12 +1698,21 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_late); + } + while (!list_empty(&dpm_suspended_list)) { - struct device *dev = to_device(dpm_suspended_list.prev); + dev = to_device(dpm_suspended_list.prev); list_move(&dev->power.entry, &dpm_late_early_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1518,22 +1720,28 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state, false); + device_suspend_late(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_suspended_list); + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice_init(&dpm_suspended_list, &dpm_late_early_list); break; + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) { dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); @@ -1614,13 +1822,15 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend(void *data, async_cookie_t cookie); + /** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ -static int device_suspend(struct device *dev, pm_message_t state, bool async) +static void device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1632,7 +1842,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) dpm_wait_for_subordinate(dev, async); - if (async_error) { + if (READ_ONCE(async_error)) { dev->power.direct_complete = false; goto Complete; } @@ -1652,7 +1862,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { dev->power.direct_complete = false; - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1736,14 +1946,18 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) Complete: if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || READ_ONCE(async_error)) + return; + + dpm_async_suspend_superior(dev, async_suspend); } static void async_suspend(void *data, async_cookie_t cookie) @@ -1761,7 +1975,8 @@ static void async_suspend(void *data, async_cookie_t cookie) int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); - int error = 0; + struct device *dev; + int error; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); @@ -1774,12 +1989,21 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend); + } + while (!list_empty(&dpm_prepared_list)) { - struct device *dev = to_device(dpm_prepared_list.prev); + dev = to_device(dpm_prepared_list.prev); list_move(&dev->power.entry, &dpm_suspended_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1787,22 +2011,28 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend(dev, state, false); + device_suspend(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_prepared_list); + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice_init(&dpm_prepared_list, &dpm_suspended_list); break; + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND); @@ -1836,7 +2066,7 @@ static bool device_prepare_smart_suspend(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; if (!dev_pm_smart_suspend(link->supplier) && @@ -1947,7 +2177,6 @@ int dpm_prepare(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_prepare"), state.event, true); - might_sleep(); /* * Give a chance for the known devices to complete their probes, before @@ -2014,8 +2243,10 @@ int dpm_suspend_start(pm_message_t state) error = dpm_prepare(state); if (error) dpm_save_failed_step(SUSPEND_PREPARE); - else + else { + pm_restrict_gfp_mask(); error = dpm_suspend(state); + } dpm_show_time(starttime, state, error, "start"); return error; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0e127b0329c0..3e84dc4122de 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -19,10 +19,24 @@ typedef int (*pm_callback_t)(struct device *); +static inline pm_callback_t get_callback_ptr(const void *start, size_t offset) +{ + return *(pm_callback_t *)(start + offset); +} + +static pm_callback_t __rpm_get_driver_callback(struct device *dev, + size_t cb_offset) +{ + if (dev->driver && dev->driver->pm) + return get_callback_ptr(dev->driver->pm, cb_offset); + + return NULL; +} + static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) { - pm_callback_t cb; const struct dev_pm_ops *ops; + pm_callback_t cb = NULL; if (dev->pm_domain) ops = &dev->pm_domain->ops; @@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) ops = NULL; if (ops) - cb = *(pm_callback_t *)((void *)ops + cb_offset); - else - cb = NULL; + cb = get_callback_ptr(ops, cb_offset); - if (!cb && dev->driver && dev->driver->pm) - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); + if (!cb) + cb = __rpm_get_driver_callback(dev, cb_offset); return cb; } @@ -290,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev) device_links_read_lock_held()) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; retval = pm_runtime_get_sync(link->supplier); @@ -1011,7 +1023,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + if (expires > 0 && expires <= ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -1191,10 +1203,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * * Return -EINVAL if runtime PM is disabled for @dev. * - * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either - * @ign_usage_count is %true or the runtime PM usage counter of @dev is not - * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 - * without changing the usage counter. + * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count + * is set, or (2) @dev is not ignoring children and its active child count is + * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment + * the usage counter of @dev and return 1. + * + * Otherwise, return 0 without changing the usage counter. * * If @ign_usage_count is %true, this function can be used to prevent suspending * the device when its runtime PM status is %RPM_ACTIVE. @@ -1216,7 +1230,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) retval = -EINVAL; } else if (dev->power.runtime_status != RPM_ACTIVE) { retval = 0; - } else if (ign_usage_count) { + } else if (ign_usage_count || (!dev->power.ignore_children && + atomic_read(&dev->power.child_count) > 0)) { retval = 1; atomic_inc(&dev->power.usage_count); } else { @@ -1249,10 +1264,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); * @dev: Target device. * * Increment the runtime PM usage counter of @dev if its runtime PM status is - * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case - * it returns 1. If the device is in a different state or its usage_count is 0, - * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device, - * in which case also the usage_count will remain unmodified. + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not + * ignoring children and its active child count is nonzero. 1 is returned in + * this case. + * + * If @dev is in a different state or it is not in use (that is, its usage + * counter is 0, or it is ignoring children, or its active child count is 0), + * 0 is returned. + * + * -EINVAL is returned if runtime PM is disabled for the device, in which case + * also the usage counter of @dev is not updated. */ int pm_runtime_get_if_in_use(struct device *dev) { @@ -1568,6 +1589,32 @@ out: } EXPORT_SYMBOL_GPL(pm_runtime_enable); +static void pm_runtime_set_suspended_action(void *data) +{ + pm_runtime_set_suspended(data); +} + +/** + * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_set_active_enabled(struct device *dev) +{ + int err; + + err = pm_runtime_set_active(dev); + if (err) + return err; + + err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); + if (err) + return err; + + return devm_pm_runtime_enable(dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); + static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -1590,6 +1637,24 @@ int devm_pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); +static void pm_runtime_put_noidle_action(void *data) +{ + pm_runtime_put_noidle(data); +} + +/** + * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_get_noresume(struct device *dev) +{ + pm_runtime_get_noresume(dev); + + return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); + /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. @@ -1783,7 +1848,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.needs_force_resume = 0; + dev->power.needs_force_resume = false; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1810,6 +1875,11 @@ void pm_runtime_reinit(struct device *dev) pm_runtime_put(dev->parent); } } + /* + * Clear power.needs_force_resume in case it has been set by + * pm_runtime_force_suspend() invoked from a driver remove callback. + */ + dev->power.needs_force_resume = false; } /** @@ -1835,7 +1905,7 @@ void pm_runtime_get_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) - if (link->flags & DL_FLAG_PM_RUNTIME) { + if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); } @@ -1889,7 +1959,7 @@ static void pm_runtime_drop_link_count(struct device *dev) */ void pm_runtime_drop_link(struct device_link *link) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) return; pm_runtime_drop_link_count(link->consumer); @@ -1897,13 +1967,23 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -bool pm_runtime_need_not_resume(struct device *dev) +static pm_callback_t get_callback(struct device *dev, size_t cb_offset) { - return atomic_read(&dev->power.usage_count) <= 1 && - (atomic_read(&dev->power.child_count) == 0 || - dev->power.ignore_children); + /* + * Setting power.strict_midlayer means that the middle layer + * code does not want its runtime PM callbacks to be invoked via + * pm_runtime_force_suspend() and pm_runtime_force_resume(), so + * return a direct pointer to the driver callback in that case. + */ + if (dev_pm_strict_midlayer_is_set(dev)) + return __rpm_get_driver_callback(dev, cb_offset); + + return __rpm_get_callback(dev, cb_offset); } +#define GET_CALLBACK(dev, callback) \ + get_callback(dev, offsetof(struct dev_pm_ops, callback)) + /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. @@ -1920,10 +2000,6 @@ bool pm_runtime_need_not_resume(struct device *dev) * sure the device is put into low power state and it should only be used during * system-wide PM transitions to sleep states. It assumes that the analogous * pm_runtime_force_resume() will be used to resume the device. - * - * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent - * state where this function has called the ->runtime_suspend callback but the - * PM core marks the driver as runtime active. */ int pm_runtime_force_suspend(struct device *dev) { @@ -1931,10 +2007,10 @@ int pm_runtime_force_suspend(struct device *dev) int ret; pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) return 0; - callback = RPM_GET_CALLBACK(dev, runtime_suspend); + callback = GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; @@ -1946,15 +2022,16 @@ int pm_runtime_force_suspend(struct device *dev) /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of - * its parent, but set its status to RPM_SUSPENDED anyway in case this - * function will be called again for it in the meantime. + * its parent and the usage counters of its suppliers. Otherwise, set + * power.needs_force_resume to let pm_runtime_force_resume() know that + * the device needs to be taken care of and to prevent this function + * from handling the device again in case the device is passed to it + * once more subsequently. */ - if (pm_runtime_need_not_resume(dev)) { + if (pm_runtime_need_not_resume(dev)) pm_runtime_set_suspended(dev); - } else { - __update_runtime_status(dev, RPM_SUSPENDED); - dev->power.needs_force_resume = 1; - } + else + dev->power.needs_force_resume = true; return 0; @@ -1965,33 +2042,37 @@ err: } EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); +#ifdef CONFIG_PM_SLEEP + /** * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * - * Prior invoking this function we expect the user to have brought the device - * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and bring the device into full power, if it is expected to be - * used on system resume. In the other case, we defer the resume to be managed - * via runtime PM. + * This function expects that either pm_runtime_force_suspend() has put the + * device into a low-power state prior to calling it, or the device had been + * runtime-suspended before the preceding system-wide suspend transition and it + * was left in suspend during that transition. + * + * The actions carried out by pm_runtime_force_suspend(), or by a runtime + * suspend in general, are reversed and the device is brought back into full + * power if it is expected to be used on system resume, which is the case when + * its needs_force_resume flag is set or when its smart_suspend flag is set and + * its runtime PM status is "active". * - * Typically this function may be invoked from a system resume callback. + * In other cases, the resume is deferred to be managed via runtime PM. + * + * Typically, this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { int (*callback)(struct device *); int ret = 0; - if (!dev->power.needs_force_resume) + if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || + pm_runtime_status_suspended(dev))) goto out; - /* - * The value of the parent's children counter is correct already, so - * just update the status of the device. - */ - __update_runtime_status(dev, RPM_ACTIVE); - - callback = RPM_GET_CALLBACK(dev, runtime_resume); + callback = GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; @@ -2002,9 +2083,30 @@ int pm_runtime_force_resume(struct device *dev) } pm_runtime_mark_last_busy(dev); + out: - dev->power.needs_force_resume = 0; + /* + * The smart_suspend flag can be cleared here because it is not going + * to be necessary until the next system-wide suspend transition that + * will update it again. + */ + dev->power.smart_suspend = false; + /* + * Also clear needs_force_resume to make this function skip devices that + * have been seen by it once. + */ + dev->power.needs_force_resume = false; + pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_resume); + +bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index f84018125b46..13b31a3adc77 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async); #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { -#ifdef CONFIG_PM_ADVANCED_DEBUG -#ifdef CONFIG_PM_SLEEP +#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP) &dev_attr_async.attr, #endif - &dev_attr_runtime_status.attr, - &dev_attr_runtime_usage.attr, - &dev_attr_runtime_active_kids.attr, - &dev_attr_runtime_enabled.attr, -#endif /* CONFIG_PM_ADVANCED_DEBUG */ NULL, }; static const struct attribute_group pm_attr_group = { @@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = { }; static struct attribute *runtime_attrs[] = { -#ifndef CONFIG_PM_ADVANCED_DEBUG &dev_attr_runtime_status.attr, -#endif &dev_attr_control.attr, &dev_attr_runtime_suspended_time.attr, &dev_attr_runtime_active_time.attr, &dev_attr_autosuspend_delay_ms.attr, +#ifdef CONFIG_PM_ADVANCED_DEBUG + &dev_attr_runtime_usage.attr, + &dev_attr_runtime_active_kids.attr, + &dev_attr_runtime_enabled.attr, +#endif NULL, }; static const struct attribute_group pm_runtime_attr_group = { diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 63bf914a4d44..d1283ff1080b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -77,7 +77,7 @@ static DEFINE_IDA(wakeup_ida); * wakeup_source_create - Create a struct wakeup_source object. * @name: Name of the new wakeup source. */ -struct wakeup_source *wakeup_source_create(const char *name) +static struct wakeup_source *wakeup_source_create(const char *name) { struct wakeup_source *ws; const char *ws_name; @@ -106,7 +106,6 @@ err_name: err_ws: return NULL; } -EXPORT_SYMBOL_GPL(wakeup_source_create); /* * Record wakeup_source statistics being deleted into a dummy wakeup_source. @@ -149,7 +148,7 @@ static void wakeup_source_free(struct wakeup_source *ws) * * Use only for wakeup source objects created with wakeup_source_create(). */ -void wakeup_source_destroy(struct wakeup_source *ws) +static void wakeup_source_destroy(struct wakeup_source *ws) { if (!ws) return; @@ -158,13 +157,12 @@ void wakeup_source_destroy(struct wakeup_source *ws) wakeup_source_record(ws); wakeup_source_free(ws); } -EXPORT_SYMBOL_GPL(wakeup_source_destroy); /** * wakeup_source_add - Add given object to the list of wakeup sources. * @ws: Wakeup source object to add to the list. */ -void wakeup_source_add(struct wakeup_source *ws) +static void wakeup_source_add(struct wakeup_source *ws) { unsigned long flags; @@ -179,13 +177,12 @@ void wakeup_source_add(struct wakeup_source *ws) list_add_rcu(&ws->entry, &wakeup_sources); raw_spin_unlock_irqrestore(&events_lock, flags); } -EXPORT_SYMBOL_GPL(wakeup_source_add); /** * wakeup_source_remove - Remove given object from the wakeup sources list. * @ws: Wakeup source object to remove from the list. */ -void wakeup_source_remove(struct wakeup_source *ws) +static void wakeup_source_remove(struct wakeup_source *ws) { unsigned long flags; @@ -204,7 +201,6 @@ void wakeup_source_remove(struct wakeup_source *ws) */ ws->timer.function = NULL; } -EXPORT_SYMBOL_GPL(wakeup_source_remove); /** * wakeup_source_register - Create wakeup source and add it to the list. @@ -337,7 +333,7 @@ int device_wakeup_enable(struct device *dev) if (!dev || !dev->power.can_wakeup) return -EINVAL; - if (pm_suspend_target_state != PM_SUSPEND_ON) + if (pm_sleep_transition_in_progress()) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); @@ -763,7 +759,7 @@ EXPORT_SYMBOL_GPL(pm_relax); */ static void pm_wakeup_timer_fn(struct timer_list *t) { - struct wakeup_source *ws = from_timer(ws, t, timer); + struct wakeup_source *ws = timer_container_of(ws, t, timer); unsigned long flags; spin_lock_irqsave(&ws->lock, flags); diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c index 6732ed2869f9..3ffd427248e8 100644 --- a/drivers/base/power/wakeup_stats.c +++ b/drivers/base/power/wakeup_stats.c @@ -34,6 +34,7 @@ wakeup_attr(active_count); wakeup_attr(event_count); wakeup_attr(wakeup_count); wakeup_attr(expire_count); +wakeup_attr(relax_count); static ssize_t active_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = { &dev_attr_event_count.attr, &dev_attr_wakeup_count.attr, &dev_attr_expire_count.attr, + &dev_attr_relax_count.attr, &dev_attr_active_time_ms.attr, &dev_attr_total_time_ms.attr, &dev_attr_max_time_ms.attr, diff --git a/drivers/base/property.c b/drivers/base/property.c index c1392743df9c..f626d5bbe806 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -928,22 +928,49 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode) EXPORT_SYMBOL_GPL(fwnode_device_is_available); /** - * device_get_child_node_count - return the number of child nodes for device - * @dev: Device to count the child nodes for + * fwnode_get_child_node_count - return the number of child nodes for a given firmware node + * @fwnode: Pointer to the parent firmware node * - * Return: the number of child nodes for a given device. + * Return: the number of child nodes for a given firmware node. + */ +unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *child; + unsigned int count = 0; + + fwnode_for_each_child_node(fwnode, child) + count++; + + return count; +} +EXPORT_SYMBOL_GPL(fwnode_get_child_node_count); + +/** + * fwnode_get_named_child_node_count - number of child nodes with given name + * @fwnode: Node which child nodes are counted. + * @name: String to match child node name against. + * + * Scan child nodes and count all the nodes with a specific name. Potential + * 'number' -ending after the 'at sign' for scanned names is ignored. + * E.g.:: + * fwnode_get_named_child_node_count(fwnode, "channel"); + * would match all the nodes:: + * channel { }, channel@0 {}, channel@0xabba {}... + * + * Return: the number of child nodes with a matching name for a given device. */ -unsigned int device_get_child_node_count(const struct device *dev) +unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode, + const char *name) { struct fwnode_handle *child; unsigned int count = 0; - device_for_each_child_node(dev, child) + fwnode_for_each_named_child_node(fwnode, child, name) count++; return count; } -EXPORT_SYMBOL_GPL(device_get_child_node_count); +EXPORT_SYMBOL_GPL(fwnode_get_named_child_node_count); bool device_dma_supported(const struct device *dev) { diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index b1affac70d5d..ffb2ef488298 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -6,8 +6,6 @@ config REGMAP bool default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI) - select IRQ_DOMAIN if REGMAP_IRQ - select MDIO_BUS if REGMAP_MDIO help Enable support for the Register Map (regmap) access API. @@ -58,12 +56,14 @@ config REGMAP_W1 config REGMAP_MDIO tristate + select MDIO_BUS config REGMAP_MMIO tristate config REGMAP_IRQ bool + select IRQ_DOMAIN config REGMAP_RAM tristate diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f7fcf2de1301..c7650fa434ad 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -34,21 +34,10 @@ static int regcache_defaults_cmp(const void *a, const void *b) return 0; } -static void regcache_defaults_swap(void *a, void *b, int size) -{ - struct reg_default *x = a; - struct reg_default *y = b; - struct reg_default tmp; - - tmp = *x; - *x = *y; - *y = tmp; -} - void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults) { sort(defaults, ndefaults, sizeof(*defaults), - regcache_defaults_cmp, regcache_defaults_swap); + regcache_defaults_cmp, NULL); } EXPORT_SYMBOL_GPL(regcache_sort_defaults); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index fb84cda92a75..c9b4c04b1cf6 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file, if (err) return count; - err = debugfs_file_get(file->f_path.dentry); - if (err) - return err; - map->lock(map->lock_arg); if (new_val && !map->cache_only) { @@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file, map->cache_only = new_val; map->unlock(map->lock_arg); - debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, if (err) return count; - err = debugfs_file_get(file->f_path.dentry); - if (err) - return err; - map->lock(map->lock_arg); if (new_val && !map->cache_bypass) { @@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, map->cache_bypass = new_val; map->unlock(map->lock_arg); - debugfs_file_put(file->f_path.dentry); return count; } diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 6c6869188c31..6112d942499b 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -6,11 +6,13 @@ // // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> +#include <linux/array_size.h> #include <linux/device.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/overflow.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -19,6 +21,7 @@ struct regmap_irq_chip_data { struct mutex lock; + struct lock_class_key lock_key; struct irq_chip irq_chip; struct regmap *map; @@ -33,6 +36,7 @@ struct regmap_irq_chip_data { void *status_reg_buf; unsigned int *main_status_buf; unsigned int *status_buf; + unsigned int *prev_status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; unsigned int *wake_buf; @@ -193,10 +197,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data) /* If we've changed our wakeup count propagate it to the parent */ if (d->wake_count < 0) for (i = d->wake_count; i < 0; i++) - irq_set_irq_wake(d->irq, 0); + disable_irq_wake(d->irq); else if (d->wake_count > 0) for (i = 0; i < d->wake_count; i++) - irq_set_irq_wake(d->irq, 1); + enable_irq_wake(d->irq); d->wake_count = 0; @@ -332,27 +336,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, return ret; } -static irqreturn_t regmap_irq_thread(int irq, void *d) +static int read_irq_data(struct regmap_irq_chip_data *data) { - struct regmap_irq_chip_data *data = d; const struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; int ret, i; - bool handled = false; u32 reg; - if (chip->handle_pre_irq) - chip->handle_pre_irq(chip->irq_drv_data); - - if (chip->runtime_pm) { - ret = pm_runtime_get_sync(map->dev); - if (ret < 0) { - dev_err(map->dev, "IRQ thread failed to resume: %d\n", - ret); - goto exit; - } - } - /* * Read only registers with active IRQs if the chip has 'main status * register'. Else read in the statuses, using a single bulk read if @@ -379,10 +369,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) reg = data->get_irq_reg(data, chip->main_status, i); ret = regmap_read(map, reg, &data->main_status_buf[i]); if (ret) { - dev_err(map->dev, - "Failed to read IRQ status %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status %d\n", ret); + return ret; } } @@ -398,10 +386,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ret = read_sub_irq_data(data, b); if (ret != 0) { - dev_err(map->dev, - "Failed to read IRQ status %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status %d\n", ret); + return ret; } } @@ -418,9 +404,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_reg_buf, chip->num_regs); if (ret != 0) { - dev_err(map->dev, "Failed to read IRQ status: %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); + return ret; } for (i = 0; i < data->chip->num_regs; i++) { @@ -436,7 +421,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) break; default: BUG(); - goto exit; + return -EIO; } } @@ -447,10 +432,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ret = regmap_read(map, reg, &data->status_buf[i]); if (ret != 0) { - dev_err(map->dev, - "Failed to read IRQ status: %d\n", - ret); - goto exit; + dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); + return ret; } } } @@ -459,6 +442,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) for (i = 0; i < data->chip->num_regs; i++) data->status_buf[i] = ~data->status_buf[i]; + return 0; +} + +static irqreturn_t regmap_irq_thread(int irq, void *d) +{ + struct regmap_irq_chip_data *data = d; + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + int ret, i; + bool handled = false; + u32 reg; + + if (chip->handle_pre_irq) + chip->handle_pre_irq(chip->irq_drv_data); + + if (chip->runtime_pm) { + ret = pm_runtime_get_sync(map->dev); + if (ret < 0) { + dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); + goto exit; + } + } + + ret = read_irq_data(data); + if (ret < 0) + goto exit; + + if (chip->status_is_level) { + for (i = 0; i < data->chip->num_regs; i++) { + unsigned int val = data->status_buf[i]; + + data->status_buf[i] ^= data->prev_status_buf[i]; + data->prev_status_buf[i] = val; + } + } + /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowledging the @@ -705,6 +724,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (!d->status_buf) goto err_alloc; + if (chip->status_is_level) { + d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf), + GFP_KERNEL); + if (!d->prev_status_buf) + goto err_alloc; + } + d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), GFP_KERNEL); if (!d->mask_buf) @@ -776,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, goto err_alloc; } - mutex_init(&d->lock); + /* + * If one regmap-irq is the parent of another then we'll try + * to lock the child with the parent locked, use an explicit + * lock_key so lockdep can figure out what's going on. + */ + lockdep_register_key(&d->lock_key); + mutex_init_with_key(&d->lock, &d->lock_key); for (i = 0; i < chip->num_irqs; i++) d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] @@ -791,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->mask_buf[i], chip->irq_drv_data); if (ret) - goto err_alloc; + goto err_mutex; } if (chip->mask_base && !chip->handle_mask_sync) { @@ -802,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } @@ -813,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } @@ -830,7 +862,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); - goto err_alloc; + goto err_mutex; } } @@ -854,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } } @@ -876,14 +908,24 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } } + /* Store current levels */ + if (chip->status_is_level) { + ret = read_irq_data(d); + if (ret < 0) + goto err_mutex; + + memcpy(d->prev_status_buf, d->status_buf, + array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0]))); + } + ret = regmap_irq_create_domain(fwnode, irq_base, chip, d); if (ret) - goto err_alloc; + goto err_mutex; ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags | IRQF_ONESHOT, @@ -900,6 +942,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, err_domain: /* Should really dispose of the domain but... */ +err_mutex: + mutex_destroy(&d->lock); + lockdep_unregister_key(&d->lock_key); err_alloc: kfree(d->type_buf); kfree(d->type_buf_def); @@ -908,6 +953,7 @@ err_alloc: kfree(d->mask_buf); kfree(d->main_status_buf); kfree(d->status_buf); + kfree(d->prev_status_buf); kfree(d->status_reg_buf); if (d->config_buf) { for (i = 0; i < chip->num_config_bases; i++) @@ -985,11 +1031,14 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->main_status_buf); kfree(d->status_reg_buf); kfree(d->status_buf); + kfree(d->prev_status_buf); if (d->config_buf) { for (i = 0; i < d->chip->num_config_bases; i++) kfree(d->config_buf[i]); kfree(d->config_buf); } + mutex_destroy(&d->lock); + lockdep_unregister_key(&d->lock_key); kfree(d); } EXPORT_SYMBOL_GPL(regmap_del_irq_chip); diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index 64ea340950b6..95c5bf2a78ee 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -736,7 +736,7 @@ static void stride(struct kunit *test) } } -static struct regmap_range_cfg test_range = { +static const struct regmap_range_cfg test_range = { .selector_reg = 1, .selector_mask = 0xff, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f2843f814675..1f3f782a04ba 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1173,6 +1173,8 @@ err_name: err_map: kfree(map); err: + if (bus && bus->free_on_exit) + kfree(bus); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__regmap_init); diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 5c78fa6ae772..deda7f35a059 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, if (prop->is_inline) return -EINVAL; - if (index * sizeof(*ref) >= prop->length) + if ((index + 1) * sizeof(*ref) > prop->length) return -ENOENT; ref_array = prop->pointer; diff --git a/drivers/base/topology.c b/drivers/base/topology.c index b962da263eee..c890e2a5b428 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -179,7 +179,7 @@ static umode_t topology_is_visible(struct kobject *kobj, static const struct attribute_group topology_attr_group = { .attrs = default_attrs, - .bin_attrs_new = bin_attrs, + .bin_attrs = bin_attrs, .is_visible = topology_is_visible, .name = "topology" }; @@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void) } device_initcall(topology_sysfs_init); + +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +{ + per_cpu(cpu_scale, cpu) = capacity; +} + +static ssize_t cpu_capacity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + + return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); +} + +static DEVICE_ATTR_RO(cpu_capacity); + +static int cpu_capacity_sysctl_add(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_create_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int cpu_capacity_sysctl_remove(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_remove_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int register_cpu_capacity_sysctl(void) +{ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", + cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); + + return 0; +} +subsys_initcall(register_cpu_capacity_sysctl); |