diff options
Diffstat (limited to 'drivers/base')
31 files changed, 2059 insertions, 846 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 6f04b831a5c0..2b8fd6bb7da0 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -230,4 +230,16 @@ config GENERIC_ARCH_NUMA Enable support for generic NUMA implementation. Currently, RISC-V and ARM64 use it. +config FW_DEVLINK_SYNC_STATE_TIMEOUT + bool "sync_state() behavior defaults to timeout instead of strict" + help + This is build time equivalent of adding kernel command line parameter + "fw_devlink.sync_state=timeout". Give up waiting on consumers and + call sync_state() on any devices that haven't yet received their + sync_state() calls after deferred_probe_timeout has expired or by + late_initcall() if !CONFIG_MODULES. You should almost always want to + select N here unless you have already successfully tested with the + command line option on every system/board your kernel is expected to + work on. + endmenu diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index b1c1dd38ab01..b741b5ba82bd 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -835,18 +835,19 @@ void __init init_cpu_topology(void) if (ret) { /* * Discard anything that was parsed if we hit an error so we - * don't use partial information. + * don't use partial information. But do not return yet to give + * arch-specific early cache level detection a chance to run. */ reset_cpu_topology(); - return; } for_each_possible_cpu(cpu) { ret = fetch_cache_info(cpu); - if (ret) { + if (!ret) + continue; + else if (ret != -ENOENT) pr_err("Early cacheinfo failed, ret = %d\n", ret); - break; - } + return; } } diff --git a/drivers/base/base.h b/drivers/base/base.h index 726a12a244c0..eb4c0ace9242 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -27,11 +27,13 @@ * on this bus. * @bus - pointer back to the struct bus_type that this structure is associated * with. + * @dev_root: Default device to use as the parent. * * @glue_dirs - "glue" directory to put in-between the parent device to * avoid namespace conflicts * @class - pointer back to the struct class that this structure is associated * with. + * @lock_key: Lock class key for use by the lock validator * * This structure is the one that is the actual kobject allowing struct * bus_type/class to be statically allocated safely. Nothing outside of the @@ -48,10 +50,11 @@ struct subsys_private { struct klist klist_drivers; struct blocking_notifier_head bus_notifier; unsigned int drivers_autoprobe:1; - struct bus_type *bus; + const struct bus_type *bus; + struct device *dev_root; struct kset glue_dirs; - struct class *class; + const struct class *class; struct lock_class_key lock_key; }; @@ -70,6 +73,8 @@ static inline void subsys_put(struct subsys_private *sp) kset_put(&sp->subsys); } +struct subsys_private *class_to_subsys(const struct class *class); + struct driver_private { struct kobject kobj; struct klist klist_devices; @@ -122,69 +127,73 @@ struct device_private { container_of(obj, struct device_private, knode_class) /* initialisation functions */ -extern int devices_init(void); -extern int buses_init(void); -extern int classes_init(void); -extern int firmware_init(void); +int devices_init(void); +int buses_init(void); +int classes_init(void); +int firmware_init(void); #ifdef CONFIG_SYS_HYPERVISOR -extern int hypervisor_init(void); +int hypervisor_init(void); #else static inline int hypervisor_init(void) { return 0; } #endif -extern int platform_bus_init(void); -extern void cpu_dev_init(void); -extern void container_dev_init(void); +int platform_bus_init(void); +void cpu_dev_init(void); +void container_dev_init(void); #ifdef CONFIG_AUXILIARY_BUS -extern void auxiliary_bus_init(void); +void auxiliary_bus_init(void); #else static inline void auxiliary_bus_init(void) { } #endif struct kobject *virtual_device_parent(struct device *dev); -extern int bus_add_device(struct device *dev); -extern void bus_probe_device(struct device *dev); -extern void bus_remove_device(struct device *dev); +int bus_add_device(struct device *dev); +void bus_probe_device(struct device *dev); +void bus_remove_device(struct device *dev); void bus_notify(struct device *dev, enum bus_notifier_event value); bool bus_is_registered(const struct bus_type *bus); -extern int bus_add_driver(struct device_driver *drv); -extern void bus_remove_driver(struct device_driver *drv); -extern void device_release_driver_internal(struct device *dev, - struct device_driver *drv, - struct device *parent); +int bus_add_driver(struct device_driver *drv); +void bus_remove_driver(struct device_driver *drv); +void device_release_driver_internal(struct device *dev, struct device_driver *drv, + struct device *parent); -extern void driver_detach(struct device_driver *drv); -extern void driver_deferred_probe_del(struct device *dev); -extern void device_set_deferred_probe_reason(const struct device *dev, - struct va_format *vaf); +void driver_detach(struct device_driver *drv); +void driver_deferred_probe_del(struct device *dev); +void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf); static inline int driver_match_device(struct device_driver *drv, struct device *dev) { return drv->bus->match ? drv->bus->match(dev, drv) : 1; } -extern int driver_add_groups(struct device_driver *drv, - const struct attribute_group **groups); -extern void driver_remove_groups(struct device_driver *drv, - const struct attribute_group **groups); +static inline void dev_sync_state(struct device *dev) +{ + if (dev->bus->sync_state) + dev->bus->sync_state(dev); + else if (dev->driver && dev->driver->sync_state) + dev->driver->sync_state(dev); +} + +int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); +void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); void device_driver_detach(struct device *dev); -extern int devres_release_all(struct device *dev); -extern void device_block_probing(void); -extern void device_unblock_probing(void); -extern void deferred_probe_extend_timeout(void); -extern void driver_deferred_probe_trigger(void); +int devres_release_all(struct device *dev); +void device_block_probing(void); +void device_unblock_probing(void); +void deferred_probe_extend_timeout(void); +void driver_deferred_probe_trigger(void); const char *device_get_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); /* /sys/devices directory */ extern struct kset *devices_kset; -extern void devices_kset_move_last(struct device *dev); +void devices_kset_move_last(struct device *dev); #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) -extern void module_add_driver(struct module *mod, struct device_driver *drv); -extern void module_remove_driver(struct device_driver *drv); +void module_add_driver(struct module *mod, struct device_driver *drv); +void module_remove_driver(struct device_driver *drv); #else static inline void module_add_driver(struct module *mod, struct device_driver *drv) { } @@ -192,23 +201,34 @@ static inline void module_remove_driver(struct device_driver *drv) { } #endif #ifdef CONFIG_DEVTMPFS -extern int devtmpfs_init(void); +int devtmpfs_init(void); #else static inline int devtmpfs_init(void) { return 0; } #endif +#ifdef CONFIG_BLOCK +extern struct class block_class; +static inline bool is_blockdev(struct device *dev) +{ + return dev->class == &block_class; +} +#else +static inline bool is_blockdev(struct device *dev) { return false; } +#endif + /* Device links support */ -extern int device_links_read_lock(void); -extern void device_links_read_unlock(int idx); -extern int device_links_read_lock_held(void); -extern int device_links_check_suppliers(struct device *dev); -extern void device_links_force_bind(struct device *dev); -extern void device_links_driver_bound(struct device *dev); -extern void device_links_driver_cleanup(struct device *dev); -extern void device_links_no_driver(struct device *dev); -extern bool device_links_busy(struct device *dev); -extern void device_links_unbind_consumers(struct device *dev); -extern void fw_devlink_drivers_done(void); +int device_links_read_lock(void); +void device_links_read_unlock(int idx); +int device_links_read_lock_held(void); +int device_links_check_suppliers(struct device *dev); +void device_links_force_bind(struct device *dev); +void device_links_driver_bound(struct device *dev); +void device_links_driver_cleanup(struct device *dev); +void device_links_no_driver(struct device *dev); +bool device_links_busy(struct device *dev); +void device_links_unbind_consumers(struct device *dev); +void fw_devlink_drivers_done(void); +void fw_devlink_probing_done(void); /* device pm support */ void device_pm_move_to_tail(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index dd4b82d7510f..84a21084d67d 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -84,7 +84,7 @@ done: return sp; } -static struct bus_type *bus_get(struct bus_type *bus) +static const struct bus_type *bus_get(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); @@ -233,7 +233,7 @@ static const struct kset_uevent_ops bus_uevent_ops = { static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count) { - struct bus_type *bus = bus_get(drv->bus); + const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; @@ -256,7 +256,7 @@ static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store); static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count) { - struct bus_type *bus = bus_get(drv->bus); + const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; @@ -274,7 +274,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); -static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) +static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf) { struct subsys_private *sp = bus_to_subsys(bus); int ret; @@ -287,7 +287,7 @@ static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) return ret; } -static ssize_t drivers_autoprobe_store(struct bus_type *bus, +static ssize_t drivers_autoprobe_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); @@ -304,7 +304,7 @@ static ssize_t drivers_autoprobe_store(struct bus_type *bus, return count; } -static ssize_t drivers_probe_store(struct bus_type *bus, +static ssize_t drivers_probe_store(const struct bus_type *bus, const char *buf, size_t count) { struct device *dev; @@ -769,7 +769,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, * attached and rescan it against existing drivers to see if it matches * any by calling device_attach() for the unbound devices. */ -int bus_rescan_devices(struct bus_type *bus) +int bus_rescan_devices(const struct bus_type *bus) { return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); } @@ -808,7 +808,7 @@ static void klist_devices_put(struct klist_node *n) put_device(dev); } -static ssize_t bus_uevent_store(struct bus_type *bus, +static ssize_t bus_uevent_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); @@ -841,7 +841,7 @@ static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL, * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the subsystem. */ -int bus_register(struct bus_type *bus) +int bus_register(const struct bus_type *bus) { int retval; struct subsys_private *priv; @@ -935,8 +935,8 @@ void bus_unregister(const struct bus_type *bus) return; pr_debug("bus: '%s': unregistering\n", bus->name); - if (bus->dev_root) - device_unregister(bus->dev_root); + if (sp->dev_root) + device_unregister(sp->dev_root); bus_kobj = &sp->subsys.kobj; sysfs_remove_groups(bus_kobj, bus->bus_groups); @@ -1198,6 +1198,7 @@ static int subsys_register(struct bus_type *subsys, const struct attribute_group **groups, struct kobject *parent_of_root) { + struct subsys_private *sp; struct device *dev; int err; @@ -1205,6 +1206,12 @@ static int subsys_register(struct bus_type *subsys, if (err < 0) return err; + sp = bus_to_subsys(subsys); + if (!sp) { + err = -EINVAL; + goto err_sp; + } + dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) { err = -ENOMEM; @@ -1223,7 +1230,8 @@ static int subsys_register(struct bus_type *subsys, if (err < 0) goto err_dev_reg; - subsys->dev_root = dev; + sp->dev_root = dev; + subsys_put(sp); return 0; err_dev_reg: @@ -1232,6 +1240,8 @@ err_dev_reg: err_name: kfree(dev); err_dev: + subsys_put(sp); +err_sp: bus_unregister(subsys); return err; } @@ -1297,7 +1307,7 @@ EXPORT_SYMBOL_GPL(subsys_virtual_register); * from being unregistered or unloaded while the caller is using it. * The caller is responsible for preventing this. */ -struct device_driver *driver_find(const char *name, struct bus_type *bus) +struct device_driver *driver_find(const char *name, const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kobject *k; @@ -1349,9 +1359,15 @@ bool bus_is_registered(const struct bus_type *bus) */ struct device *bus_get_dev_root(const struct bus_type *bus) { - if (bus) - return get_device(bus->dev_root); - return NULL; + struct subsys_private *sp = bus_to_subsys(bus); + struct device *dev_root; + + if (!sp) + return NULL; + + dev_root = get_device(sp->dev_root); + subsys_put(sp); + return dev_root; } EXPORT_SYMBOL_GPL(bus_get_dev_root); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index f6573c335f4c..bba3482ddeb8 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -14,7 +14,7 @@ #include <linux/cpu.h> #include <linux/device.h> #include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> @@ -28,6 +28,9 @@ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); #define per_cpu_cacheinfo_idx(cpu, idx) \ (per_cpu_cacheinfo(cpu) + (idx)) +/* Set if no cache information is found in DT/ACPI. */ +static bool use_arch_info; + struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) { return ci_cacheinfo(cpu); @@ -38,11 +41,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, { /* * For non DT/ACPI systems, assume unique level 1 caches, - * system-wide shared caches for all other levels. This will be used - * only if arch specific code has not populated shared_cpu_map + * system-wide shared caches for all other levels. */ - if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI))) - return !(this_leaf->level == 1); + if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) || + use_arch_info) + return (this_leaf->level != 1) && (sib_leaf->level != 1); if ((sib_leaf->attributes & CACHE_ID) && (this_leaf->attributes & CACHE_ID)) @@ -79,6 +82,9 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) } #ifdef CONFIG_OF + +static bool of_check_cache_nodes(struct device_node *np); + /* OF properties to query for a given cache type */ struct cache_type_info { const char *size_prop; @@ -206,6 +212,11 @@ static int cache_setup_of_node(unsigned int cpu) return -ENOENT; } + if (!of_check_cache_nodes(np)) { + of_node_put(np); + return -ENOENT; + } + prev = np; while (index < cache_leaves(cpu)) { @@ -230,6 +241,25 @@ static int cache_setup_of_node(unsigned int cpu) return 0; } +static bool of_check_cache_nodes(struct device_node *np) +{ + struct device_node *next; + + if (of_property_present(np, "cache-size") || + of_property_present(np, "i-cache-size") || + of_property_present(np, "d-cache-size") || + of_property_present(np, "cache-unified")) + return true; + + next = of_find_next_cache_node(np); + if (next) { + of_node_put(next); + return true; + } + + return false; +} + static int of_count_cache_leaves(struct device_node *np) { unsigned int leaves = 0; @@ -261,6 +291,11 @@ int init_of_cache_level(unsigned int cpu) struct device_node *prev = NULL; unsigned int levels = 0, leaves, level; + if (!of_check_cache_nodes(np)) { + of_node_put(np); + return -ENOENT; + } + leaves = of_count_cache_leaves(np); if (leaves > 0) levels = 1; @@ -312,6 +347,10 @@ static int cache_setup_properties(unsigned int cpu) else if (!acpi_disabled) ret = cache_setup_acpi(cpu); + // Assume there is no cache information available in DT/ACPI from now. + if (ret && use_arch_cache_info()) + use_arch_info = true; + return ret; } @@ -330,7 +369,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) * to update the shared cpu_map if the cache attributes were * populated early before all the cpus are brought online */ - if (!last_level_cache_is_valid(cpu)) { + if (!last_level_cache_is_valid(cpu) && !use_arch_info) { ret = cache_setup_properties(cpu); if (ret) return ret; @@ -398,6 +437,11 @@ static void free_cache_attributes(unsigned int cpu) cache_shared_cpu_map_remove(cpu); } +int __weak early_cache_level(unsigned int cpu) +{ + return -ENOENT; +} + int __weak init_cache_level(unsigned int cpu) { return -ENOENT; @@ -423,63 +467,95 @@ int allocate_cache_info(int cpu) int fetch_cache_info(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); unsigned int levels = 0, split_levels = 0; int ret; if (acpi_disabled) { ret = init_of_cache_level(cpu); - if (ret < 0) - return ret; } else { ret = acpi_get_cache_info(cpu, &levels, &split_levels); - if (ret < 0) + if (!ret) { + this_cpu_ci->num_levels = levels; + /* + * This assumes that: + * - there cannot be any split caches (data/instruction) + * above a unified cache + * - data/instruction caches come by pair + */ + this_cpu_ci->num_leaves = levels + split_levels; + } + } + + if (ret || !cache_leaves(cpu)) { + ret = early_cache_level(cpu); + if (ret) return ret; - this_cpu_ci = get_cpu_cacheinfo(cpu); - this_cpu_ci->num_levels = levels; - /* - * This assumes that: - * - there cannot be any split caches (data/instruction) - * above a unified cache - * - data/instruction caches come by pair - */ - this_cpu_ci->num_leaves = levels + split_levels; + if (!cache_leaves(cpu)) + return -ENOENT; + + this_cpu_ci->early_ci_levels = true; } - if (!cache_leaves(cpu)) - return -ENOENT; return allocate_cache_info(cpu); } -int detect_cache_attributes(unsigned int cpu) +static inline int init_level_allocate_ci(unsigned int cpu) { - int ret; + unsigned int early_leaves = cache_leaves(cpu); /* Since early initialization/allocation of the cacheinfo is allowed * via fetch_cache_info() and this also gets called as CPU hotplug * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped * as it will happen only once (the cacheinfo memory is never freed). - * Just populate the cacheinfo. + * Just populate the cacheinfo. However, if the cacheinfo has been + * allocated early through the arch-specific early_cache_level() call, + * there is a chance the info is wrong (this can happen on arm64). In + * that case, call init_cache_level() anyway to give the arch-specific + * code a chance to make things right. */ - if (per_cpu_cacheinfo(cpu)) - goto populate_leaves; + if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels) + return 0; if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; - ret = allocate_cache_info(cpu); + /* + * Now that we have properly initialized the cache level info, make + * sure we don't try to do that again the next time we are called + * (e.g. as CPU hotplug callbacks). + */ + ci_cacheinfo(cpu)->early_ci_levels = false; + + if (cache_leaves(cpu) <= early_leaves) + return 0; + + kfree(per_cpu_cacheinfo(cpu)); + return allocate_cache_info(cpu); +} + +int detect_cache_attributes(unsigned int cpu) +{ + int ret; + + ret = init_level_allocate_ci(cpu); if (ret) return ret; -populate_leaves: /* - * populate_cache_leaves() may completely setup the cache leaves and - * shared_cpu_map or it may leave it partially setup. + * If LLC is valid the cache leaves were already populated so just go to + * update the cpu map. */ - ret = populate_cache_leaves(cpu); - if (ret) - goto free_ci; + if (!last_level_cache_is_valid(cpu)) { + /* + * populate_cache_leaves() may completely setup the cache leaves and + * shared_cpu_map or it may leave it partially setup. + */ + ret = populate_cache_leaves(cpu); + if (ret) + goto free_ci; + } /* * For systems using DT for cache hierarchy, fw_token diff --git a/drivers/base/class.c b/drivers/base/class.c index 2373b3e210d8..ac1808d1a2e8 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -20,8 +20,52 @@ #include <linux/mutex.h> #include "base.h" +/* /sys/class */ +static struct kset *class_kset; + #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr) +/** + * class_to_subsys - Turn a struct class into a struct subsys_private + * + * @class: pointer to the struct bus_type to look up + * + * The driver core internals need to work on the subsys_private structure, not + * the external struct class pointer. This function walks the list of + * registered classes in the system and finds the matching one and returns the + * internal struct subsys_private that relates to that class. + * + * Note, the reference count of the return value is INCREMENTED if it is not + * NULL. A call to subsys_put() must be done when finished with the pointer in + * order for it to be properly freed. + */ +struct subsys_private *class_to_subsys(const struct class *class) +{ + struct subsys_private *sp = NULL; + struct kobject *kobj; + + if (!class || !class_kset) + return NULL; + + spin_lock(&class_kset->list_lock); + + if (list_empty(&class_kset->list)) + goto done; + + list_for_each_entry(kobj, &class_kset->list, entry) { + struct kset *kset = container_of(kobj, struct kset, kobj); + + sp = container_of_const(kset, struct subsys_private, subsys); + if (sp->class == class) + goto done; + } + sp = NULL; +done: + sp = subsys_get(sp); + spin_unlock(&class_kset->list_lock); + return sp; +} + static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -49,25 +93,24 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, static void class_release(struct kobject *kobj) { struct subsys_private *cp = to_subsys_private(kobj); - struct class *class = cp->class; + const struct class *class = cp->class; pr_debug("class '%s': release.\n", class->name); - class->p = NULL; - if (class->class_release) class->class_release(class); else pr_debug("class '%s' does not have a release() function, " "be careful\n", class->name); + lockdep_unregister_key(&cp->lock_key); kfree(cp); } static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj) { const struct subsys_private *cp = to_subsys_private(kobj); - struct class *class = cp->class; + const struct class *class = cp->class; return class->ns_type; } @@ -83,44 +126,34 @@ static const struct kobj_type class_ktype = { .child_ns_type = class_child_ns_type, }; -/* Hotplug events for classes go to the class subsys */ -static struct kset *class_kset; - - -int class_create_file_ns(struct class *cls, const struct class_attribute *attr, +int class_create_file_ns(const struct class *cls, const struct class_attribute *attr, const void *ns) { + struct subsys_private *sp = class_to_subsys(cls); int error; - if (cls) - error = sysfs_create_file_ns(&cls->p->subsys.kobj, - &attr->attr, ns); - else - error = -EINVAL; + if (!sp) + return -EINVAL; + + error = sysfs_create_file_ns(&sp->subsys.kobj, &attr->attr, ns); + subsys_put(sp); + return error; } EXPORT_SYMBOL_GPL(class_create_file_ns); -void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, +void class_remove_file_ns(const struct class *cls, const struct class_attribute *attr, const void *ns) { - if (cls) - sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns); -} -EXPORT_SYMBOL_GPL(class_remove_file_ns); + struct subsys_private *sp = class_to_subsys(cls); -static struct class *class_get(struct class *cls) -{ - if (cls) - kset_get(&cls->p->subsys); - return cls; -} + if (!sp) + return; -static void class_put(struct class *cls) -{ - if (cls) - kset_put(&cls->p->subsys); + sysfs_remove_file_ns(&sp->subsys.kobj, &attr->attr, ns); + subsys_put(sp); } +EXPORT_SYMBOL_GPL(class_remove_file_ns); static struct device *klist_class_to_dev(struct klist_node *n) { @@ -142,21 +175,10 @@ static void klist_class_dev_put(struct klist_node *n) put_device(dev); } -static int class_add_groups(struct class *cls, - const struct attribute_group **groups) -{ - return sysfs_create_groups(&cls->p->subsys.kobj, groups); -} - -static void class_remove_groups(struct class *cls, - const struct attribute_group **groups) -{ - return sysfs_remove_groups(&cls->p->subsys.kobj, groups); -} - -int __class_register(struct class *cls, struct lock_class_key *key) +int class_register(const struct class *cls) { struct subsys_private *cp; + struct lock_class_key *key; int error; pr_debug("device class '%s': registering\n", cls->name); @@ -167,6 +189,8 @@ int __class_register(struct class *cls, struct lock_class_key *key) klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put); INIT_LIST_HEAD(&cp->interfaces); kset_init(&cp->glue_dirs); + key = &cp->lock_key; + lockdep_register_key(key); __mutex_init(&cp->mutex, "subsys mutex", key); error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); if (error) { @@ -174,27 +198,15 @@ int __class_register(struct class *cls, struct lock_class_key *key) return error; } - /* set the default /sys/dev directory for devices of this class */ - if (!cls->dev_kobj) - cls->dev_kobj = sysfs_dev_char_kobj; - -#if defined(CONFIG_BLOCK) - /* let the block class directory show up in the root of sysfs */ - if (!sysfs_deprecated || cls != &block_class) - cp->subsys.kobj.kset = class_kset; -#else cp->subsys.kobj.kset = class_kset; -#endif cp->subsys.kobj.ktype = &class_ktype; cp->class = cls; - cls->p = cp; error = kset_register(&cp->subsys); if (error) goto err_out; - error = class_add_groups(class_get(cls), cls->class_groups); - class_put(cls); + error = sysfs_create_groups(&cp->subsys.kobj, cls->class_groups); if (error) { kobject_del(&cp->subsys.kobj); kfree_const(cp->subsys.kobj.name); @@ -204,30 +216,34 @@ int __class_register(struct class *cls, struct lock_class_key *key) err_out: kfree(cp); - cls->p = NULL; return error; } -EXPORT_SYMBOL_GPL(__class_register); +EXPORT_SYMBOL_GPL(class_register); -void class_unregister(struct class *cls) +void class_unregister(const struct class *cls) { + struct subsys_private *sp = class_to_subsys(cls); + + if (!sp) + return; + pr_debug("device class '%s': unregistering\n", cls->name); - class_remove_groups(cls, cls->class_groups); - kset_unregister(&cls->p->subsys); + + sysfs_remove_groups(&sp->subsys.kobj, cls->class_groups); + kset_unregister(&sp->subsys); + subsys_put(sp); } EXPORT_SYMBOL_GPL(class_unregister); -static void class_create_release(struct class *cls) +static void class_create_release(const struct class *cls) { pr_debug("%s called for %s\n", __func__, cls->name); kfree(cls); } /** - * __class_create - create a struct class structure - * @owner: pointer to the module that is to "own" this struct class + * class_create - create a struct class structure * @name: pointer to a string for the name of this class. - * @key: the lock_class_key for this class; used by mutex lock debugging * * This is used to create a struct class pointer that can then be used * in calls to device_create(). @@ -237,8 +253,7 @@ static void class_create_release(struct class *cls) * Note, the pointer created here is to be destroyed when finished by * making a call to class_destroy(). */ -struct class *__class_create(struct module *owner, const char *name, - struct lock_class_key *key) +struct class *class_create(const char *name) { struct class *cls; int retval; @@ -250,10 +265,9 @@ struct class *__class_create(struct module *owner, const char *name, } cls->name = name; - cls->owner = owner; cls->class_release = class_create_release; - retval = __class_register(cls, key); + retval = class_register(cls); if (retval) goto error; @@ -263,7 +277,7 @@ error: kfree(cls); return ERR_PTR(retval); } -EXPORT_SYMBOL_GPL(__class_create); +EXPORT_SYMBOL_GPL(class_create); /** * class_destroy - destroys a struct class structure @@ -272,7 +286,7 @@ EXPORT_SYMBOL_GPL(__class_create); * Note, the pointer to be destroyed must have been created with a call * to class_create(). */ -void class_destroy(struct class *cls) +void class_destroy(const struct class *cls) { if (IS_ERR_OR_NULL(cls)) return; @@ -293,14 +307,18 @@ EXPORT_SYMBOL_GPL(class_destroy); * otherwise if it is NULL, the iteration starts at the beginning of * the list. */ -void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, - struct device *start, const struct device_type *type) +void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class, + const struct device *start, const struct device_type *type) { + struct subsys_private *sp = class_to_subsys(class); struct klist_node *start_knode = NULL; + if (!sp) + return; + if (start) start_knode = &start->p->knode_class; - klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); + klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); iter->type = type; } EXPORT_SYMBOL_GPL(class_dev_iter_init); @@ -364,16 +382,17 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit); * @fn is allowed to do anything including calling back into class * code. There's no locking restriction. */ -int class_for_each_device(struct class *class, struct device *start, +int class_for_each_device(const struct class *class, const struct device *start, void *data, int (*fn)(struct device *, void *)) { + struct subsys_private *sp = class_to_subsys(class); struct class_dev_iter iter; struct device *dev; int error = 0; if (!class) return -EINVAL; - if (!class->p) { + if (!sp) { WARN(1, "%s called for class '%s' before it was initialized", __func__, class->name); return -EINVAL; @@ -386,6 +405,7 @@ int class_for_each_device(struct class *class, struct device *start, break; } class_dev_iter_exit(&iter); + subsys_put(sp); return error; } @@ -411,16 +431,17 @@ EXPORT_SYMBOL_GPL(class_for_each_device); * @match is allowed to do anything including calling back into class * code. There's no locking restriction. */ -struct device *class_find_device(struct class *class, struct device *start, +struct device *class_find_device(const struct class *class, const struct device *start, const void *data, int (*match)(struct device *, const void *)) { + struct subsys_private *sp = class_to_subsys(class); struct class_dev_iter iter; struct device *dev; if (!class) return NULL; - if (!class->p) { + if (!sp) { WARN(1, "%s called for class '%s' before it was initialized", __func__, class->name); return NULL; @@ -434,6 +455,7 @@ struct device *class_find_device(struct class *class, struct device *start, } } class_dev_iter_exit(&iter); + subsys_put(sp); return dev; } @@ -441,26 +463,33 @@ EXPORT_SYMBOL_GPL(class_find_device); int class_interface_register(struct class_interface *class_intf) { - struct class *parent; + struct subsys_private *sp; + const struct class *parent; struct class_dev_iter iter; struct device *dev; if (!class_intf || !class_intf->class) return -ENODEV; - parent = class_get(class_intf->class); - if (!parent) + parent = class_intf->class; + sp = class_to_subsys(parent); + if (!sp) return -EINVAL; - mutex_lock(&parent->p->mutex); - list_add_tail(&class_intf->node, &parent->p->interfaces); + /* + * Reference in sp is now incremented and will be dropped when + * the interface is removed in the call to class_interface_unregister() + */ + + mutex_lock(&sp->mutex); + list_add_tail(&class_intf->node, &sp->interfaces); if (class_intf->add_dev) { class_dev_iter_init(&iter, parent, NULL, NULL); while ((dev = class_dev_iter_next(&iter))) - class_intf->add_dev(dev, class_intf); + class_intf->add_dev(dev); class_dev_iter_exit(&iter); } - mutex_unlock(&parent->p->mutex); + mutex_unlock(&sp->mutex); return 0; } @@ -468,29 +497,40 @@ EXPORT_SYMBOL_GPL(class_interface_register); void class_interface_unregister(struct class_interface *class_intf) { - struct class *parent = class_intf->class; + struct subsys_private *sp; + const struct class *parent = class_intf->class; struct class_dev_iter iter; struct device *dev; if (!parent) return; - mutex_lock(&parent->p->mutex); + sp = class_to_subsys(parent); + if (!sp) + return; + + mutex_lock(&sp->mutex); list_del_init(&class_intf->node); if (class_intf->remove_dev) { class_dev_iter_init(&iter, parent, NULL, NULL); while ((dev = class_dev_iter_next(&iter))) - class_intf->remove_dev(dev, class_intf); + class_intf->remove_dev(dev); class_dev_iter_exit(&iter); } - mutex_unlock(&parent->p->mutex); + mutex_unlock(&sp->mutex); - class_put(parent); + /* + * Decrement the reference count twice, once for the class_to_subsys() + * call in the start of this function, and the second one from the + * reference increment in class_interface_register() + */ + subsys_put(sp); + subsys_put(sp); } EXPORT_SYMBOL_GPL(class_interface_unregister); -ssize_t show_class_attr_string(struct class *class, - struct class_attribute *attr, char *buf) +ssize_t show_class_attr_string(const struct class *class, + const struct class_attribute *attr, char *buf) { struct class_attribute_string *cs; @@ -587,6 +627,31 @@ void class_compat_remove_link(struct class_compat *cls, struct device *dev, } EXPORT_SYMBOL_GPL(class_compat_remove_link); +/** + * class_is_registered - determine if at this moment in time, a class is + * registered in the driver core or not. + * @class: the class to check + * + * Returns a boolean to state if the class is registered in the driver core + * or not. Note that the value could switch right after this call is made, + * so only use this in places where you "know" it is safe to do so (usually + * to determine if the specific class has been registered yet or not). + * + * Be careful in using this. + */ +bool class_is_registered(const struct class *class) +{ + struct subsys_private *sp = class_to_subsys(class); + bool is_initialized = false; + + if (sp) { + is_initialized = true; + subsys_put(sp); + } + return is_initialized; +} +EXPORT_SYMBOL_GPL(class_is_registered); + int __init classes_init(void) { class_kset = kset_create_and_add("class", NULL, NULL); diff --git a/drivers/base/core.c b/drivers/base/core.c index 6878dfcbf0d6..3dff5037943e 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -36,19 +36,6 @@ #include "physical_location.h" #include "power/power.h" -#ifdef CONFIG_SYSFS_DEPRECATED -#ifdef CONFIG_SYSFS_DEPRECATED_V2 -long sysfs_deprecated = 1; -#else -long sysfs_deprecated = 0; -#endif -static int __init sysfs_deprecated_setup(char *arg) -{ - return kstrtol(arg, 10, &sysfs_deprecated); -} -early_param("sysfs.deprecated", sysfs_deprecated_setup); -#endif - /* Device links support. */ static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; @@ -550,13 +537,11 @@ static void devlink_dev_release(struct device *dev) static struct class devlink_class = { .name = "devlink", - .owner = THIS_MODULE, .dev_groups = devlink_groups, .dev_release = devlink_dev_release, }; -static int devlink_add_symlinks(struct device *dev, - struct class_interface *class_intf) +static int devlink_add_symlinks(struct device *dev) { int ret; size_t len; @@ -605,8 +590,7 @@ out: return ret; } -static void devlink_remove_symlinks(struct device *dev, - struct class_interface *class_intf) +static void devlink_remove_symlinks(struct device *dev) { struct device_link *link = to_devlink(dev); size_t len; @@ -1173,10 +1157,7 @@ static void device_links_flush_sync_list(struct list_head *list, if (dev != dont_lock_dev) device_lock(dev); - if (dev->bus->sync_state) - dev->bus->sync_state(dev); - else if (dev->driver && dev->driver->sync_state) - dev->driver->sync_state(dev); + dev_sync_state(dev); if (dev != dont_lock_dev) device_unlock(dev); @@ -1685,6 +1666,31 @@ static int __init fw_devlink_strict_setup(char *arg) } early_param("fw_devlink.strict", fw_devlink_strict_setup); +#define FW_DEVLINK_SYNC_STATE_STRICT 0 +#define FW_DEVLINK_SYNC_STATE_TIMEOUT 1 + +#ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT +static int fw_devlink_sync_state; +#else +static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT; +#endif + +static int __init fw_devlink_sync_state_setup(char *arg) +{ + if (!arg) + return -EINVAL; + + if (strcmp(arg, "strict") == 0) { + fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT; + return 0; + } else if (strcmp(arg, "timeout") == 0) { + fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT; + return 0; + } + return -EINVAL; +} +early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup); + static inline u32 fw_devlink_get_flags(u8 fwlink_flags) { if (fwlink_flags & FWLINK_FLAG_CYCLE) @@ -1755,6 +1761,44 @@ void fw_devlink_drivers_done(void) device_links_write_unlock(); } +static int fw_devlink_dev_sync_state(struct device *dev, void *data) +{ + struct device_link *link = to_devlink(dev); + struct device *sup = link->supplier; + + if (!(link->flags & DL_FLAG_MANAGED) || + link->status == DL_STATE_ACTIVE || sup->state_synced || + !dev_has_sync_state(sup)) + return 0; + + if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) { + dev_warn(sup, "sync_state() pending due to %s\n", + dev_name(link->consumer)); + return 0; + } + + if (!list_empty(&sup->links.defer_sync)) + return 0; + + dev_warn(sup, "Timed out. Forcing sync_state()\n"); + sup->state_synced = true; + get_device(sup); + list_add_tail(&sup->links.defer_sync, data); + + return 0; +} + +void fw_devlink_probing_done(void) +{ + LIST_HEAD(sync_list); + + device_links_write_lock(); + class_for_each_device(&devlink_class, NULL, &sync_list, + fw_devlink_dev_sync_state); + device_links_write_unlock(); + device_links_flush_sync_list(&sync_list, NULL); +} + /** * wait_for_init_devices_probe - Try to probe any device needed for init * @@ -2209,8 +2253,12 @@ static void fw_devlink_link_device(struct device *dev) int (*platform_notify)(struct device *dev) = NULL; int (*platform_notify_remove)(struct device *dev) = NULL; static struct kobject *dev_kobj; -struct kobject *sysfs_dev_char_kobj; -struct kobject *sysfs_dev_block_kobj; + +/* /sys/dev/char */ +static struct kobject *sysfs_dev_char_kobj; + +/* /sys/dev/block */ +static struct kobject *sysfs_dev_block_kobj; static DEFINE_MUTEX(device_hotplug_lock); @@ -2779,7 +2827,7 @@ EXPORT_SYMBOL_GPL(devm_device_add_groups); static int device_add_attrs(struct device *dev) { - struct class *class = dev->class; + const struct class *class = dev->class; const struct device_type *type = dev->type; int error; @@ -2846,7 +2894,7 @@ static int device_add_attrs(struct device *dev) static void device_remove_attrs(struct device *dev) { - struct class *class = dev->class; + const struct class *class = dev->class; const struct device_type *type = dev->type; if (dev->physical_location) { @@ -3079,7 +3127,7 @@ struct kobject *virtual_device_parent(struct device *dev) struct class_dir { struct kobject kobj; - struct class *class; + const struct class *class; }; #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) @@ -3103,8 +3151,8 @@ static const struct kobj_type class_dir_ktype = { .child_ns_type = class_dir_child_ns_type }; -static struct kobject * -class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) +static struct kobject *class_dir_create_and_add(struct subsys_private *sp, + struct kobject *parent_kobj) { struct class_dir *dir; int retval; @@ -3113,12 +3161,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) if (!dir) return ERR_PTR(-ENOMEM); - dir->class = class; + dir->class = sp->class; kobject_init(&dir->kobj, &class_dir_ktype); - dir->kobj.kset = &class->p->glue_dirs; + dir->kobj.kset = &sp->glue_dirs; - retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); + retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name); if (retval < 0) { kobject_put(&dir->kobj); return ERR_PTR(retval); @@ -3131,21 +3179,13 @@ static DEFINE_MUTEX(gdp_mutex); static struct kobject *get_device_parent(struct device *dev, struct device *parent) { + struct subsys_private *sp = class_to_subsys(dev->class); struct kobject *kobj = NULL; - if (dev->class) { + if (sp) { struct kobject *parent_kobj; struct kobject *k; -#ifdef CONFIG_BLOCK - /* block disks show up in /sys/block */ - if (sysfs_deprecated && dev->class == &block_class) { - if (parent && parent->class == &block_class) - return &parent->kobj; - return &block_class.p->subsys.kobj; - } -#endif - /* * If we have no parent, we live in "virtual". * Class-devices with a non class-device as parent, live @@ -3153,30 +3193,34 @@ static struct kobject *get_device_parent(struct device *dev, */ if (parent == NULL) parent_kobj = virtual_device_parent(dev); - else if (parent->class && !dev->class->ns_type) + else if (parent->class && !dev->class->ns_type) { + subsys_put(sp); return &parent->kobj; - else + } else { parent_kobj = &parent->kobj; + } mutex_lock(&gdp_mutex); /* find our class-directory at the parent and reference it */ - spin_lock(&dev->class->p->glue_dirs.list_lock); - list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) + spin_lock(&sp->glue_dirs.list_lock); + list_for_each_entry(k, &sp->glue_dirs.list, entry) if (k->parent == parent_kobj) { kobj = kobject_get(k); break; } - spin_unlock(&dev->class->p->glue_dirs.list_lock); + spin_unlock(&sp->glue_dirs.list_lock); if (kobj) { mutex_unlock(&gdp_mutex); + subsys_put(sp); return kobj; } /* or create a new class-directory at the parent device */ - k = class_dir_create_and_add(dev->class, parent_kobj); + k = class_dir_create_and_add(sp, parent_kobj); /* do not emit an uevent for this simple "glue" directory */ mutex_unlock(&gdp_mutex); + subsys_put(sp); return k; } @@ -3199,10 +3243,23 @@ static struct kobject *get_device_parent(struct device *dev, static inline bool live_in_glue_dir(struct kobject *kobj, struct device *dev) { - if (!kobj || !dev->class || - kobj->kset != &dev->class->p->glue_dirs) + struct subsys_private *sp; + bool retval; + + if (!kobj || !dev->class) return false; - return true; + + sp = class_to_subsys(dev->class); + if (!sp) + return false; + + if (kobj->kset == &sp->glue_dirs) + retval = true; + else + retval = false; + + subsys_put(sp); + return retval; } static inline struct kobject *get_glue_dir(struct device *dev) @@ -3299,6 +3356,7 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) static int device_add_class_symlinks(struct device *dev) { struct device_node *of_node = dev_of_node(dev); + struct subsys_private *sp; int error; if (of_node) { @@ -3308,12 +3366,11 @@ static int device_add_class_symlinks(struct device *dev) /* An error here doesn't warrant bringing down the device */ } - if (!dev->class) + sp = class_to_subsys(dev->class); + if (!sp) return 0; - error = sysfs_create_link(&dev->kobj, - &dev->class->p->subsys.kobj, - "subsystem"); + error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); if (error) goto out_devnode; @@ -3324,46 +3381,38 @@ static int device_add_class_symlinks(struct device *dev) goto out_subsys; } -#ifdef CONFIG_BLOCK - /* /sys/block has directories and does not need symlinks */ - if (sysfs_deprecated && dev->class == &block_class) - return 0; -#endif - /* link in the class directory pointing to the device */ - error = sysfs_create_link(&dev->class->p->subsys.kobj, - &dev->kobj, dev_name(dev)); + error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev)); if (error) goto out_device; - - return 0; + goto exit; out_device: sysfs_remove_link(&dev->kobj, "device"); - out_subsys: sysfs_remove_link(&dev->kobj, "subsystem"); out_devnode: sysfs_remove_link(&dev->kobj, "of_node"); +exit: + subsys_put(sp); return error; } static void device_remove_class_symlinks(struct device *dev) { + struct subsys_private *sp = class_to_subsys(dev->class); + if (dev_of_node(dev)) sysfs_remove_link(&dev->kobj, "of_node"); - if (!dev->class) + if (!sp) return; if (dev->parent && device_is_not_partition(dev)) sysfs_remove_link(&dev->kobj, "device"); sysfs_remove_link(&dev->kobj, "subsystem"); -#ifdef CONFIG_BLOCK - if (sysfs_deprecated && dev->class == &block_class) - return; -#endif - sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); + sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev)); + subsys_put(sp); } /** @@ -3383,27 +3432,13 @@ int dev_set_name(struct device *dev, const char *fmt, ...) } EXPORT_SYMBOL_GPL(dev_set_name); -/** - * device_to_dev_kobj - select a /sys/dev/ directory for the device - * @dev: device - * - * By default we select char/ for new entries. Setting class->dev_obj - * to NULL prevents an entry from being created. class->dev_kobj must - * be set (or cleared) before any devices are registered to the class - * otherwise device_create_sys_dev_entry() and - * device_remove_sys_dev_entry() will disagree about the presence of - * the link. - */ +/* select a /sys/dev/ directory for the device */ static struct kobject *device_to_dev_kobj(struct device *dev) { - struct kobject *kobj; - - if (dev->class) - kobj = dev->class->dev_kobj; + if (is_blockdev(dev)) + return sysfs_dev_block_kobj; else - kobj = sysfs_dev_char_kobj; - - return kobj; + return sysfs_dev_char_kobj; } static int device_create_sys_dev_entry(struct device *dev) @@ -3472,6 +3507,7 @@ static int device_private_init(struct device *dev) */ int device_add(struct device *dev) { + struct subsys_private *sp; struct device *parent; struct kobject *kobj; struct class_interface *class_intf; @@ -3600,18 +3636,18 @@ int device_add(struct device *dev) klist_add_tail(&dev->p->knode_parent, &parent->p->klist_children); - if (dev->class) { - mutex_lock(&dev->class->p->mutex); + sp = class_to_subsys(dev->class); + if (sp) { + mutex_lock(&sp->mutex); /* tie the class to the device */ - klist_add_tail(&dev->p->knode_class, - &dev->class->p->klist_devices); + klist_add_tail(&dev->p->knode_class, &sp->klist_devices); /* notify any interfaces that the device is here */ - list_for_each_entry(class_intf, - &dev->class->p->interfaces, node) + list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->add_dev) - class_intf->add_dev(dev, class_intf); - mutex_unlock(&dev->class->p->mutex); + class_intf->add_dev(dev); + mutex_unlock(&sp->mutex); + subsys_put(sp); } done: put_device(dev); @@ -3731,6 +3767,7 @@ EXPORT_SYMBOL_GPL(kill_device); */ void device_del(struct device *dev) { + struct subsys_private *sp; struct device *parent = dev->parent; struct kobject *glue_dir = NULL; struct class_interface *class_intf; @@ -3757,18 +3794,20 @@ void device_del(struct device *dev) device_remove_sys_dev_entry(dev); device_remove_file(dev, &dev_attr_dev); } - if (dev->class) { + + sp = class_to_subsys(dev->class); + if (sp) { device_remove_class_symlinks(dev); - mutex_lock(&dev->class->p->mutex); + mutex_lock(&sp->mutex); /* notify any interfaces that the device is now gone */ - list_for_each_entry(class_intf, - &dev->class->p->interfaces, node) + list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->remove_dev) - class_intf->remove_dev(dev, class_intf); + class_intf->remove_dev(dev); /* remove the device from the class list */ klist_del(&dev->p->knode_class); - mutex_unlock(&dev->class->p->mutex); + mutex_unlock(&sp->mutex); + subsys_put(sp); } device_remove_file(dev, &dev_attr_uevent); device_remove_attrs(dev); @@ -4231,7 +4270,7 @@ static void device_create_release(struct device *dev) } static __printf(6, 0) struct device * -device_create_groups_vargs(struct class *class, struct device *parent, +device_create_groups_vargs(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) @@ -4291,11 +4330,8 @@ error: * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. - * - * Note: the struct class passed to this function must have previously - * been created with a call to class_create(). */ -struct device *device_create(struct class *class, struct device *parent, +struct device *device_create(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { va_list vargs; @@ -4332,11 +4368,8 @@ EXPORT_SYMBOL_GPL(device_create); * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. - * - * Note: the struct class passed to this function must have previously - * been created with a call to class_create(). */ -struct device *device_create_with_groups(struct class *class, +struct device *device_create_with_groups(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, @@ -4361,7 +4394,7 @@ EXPORT_SYMBOL_GPL(device_create_with_groups); * This call unregisters and cleans up a device that was created with a * call to device_create(). */ -void device_destroy(struct class *class, dev_t devt) +void device_destroy(const struct class *class, dev_t devt) { struct device *dev; @@ -4383,9 +4416,12 @@ EXPORT_SYMBOL_GPL(device_destroy); * on the same device to ensure that new_name is valid and * won't conflict with other devices. * - * Note: Don't call this function. Currently, the networking layer calls this - * function, but that will change. The following text from Kay Sievers offers - * some insight: + * Note: given that some subsystems (networking and infiniband) use this + * function, with no immediate plans for this to change, we cannot assume or + * require that this function not be called at all. + * + * However, if you're writing new code, do not call this function. The following + * text from Kay Sievers offers some insight: * * Renaming devices is racy at many levels, symlinks and other stuff are not * replaced atomically, and you get a "move" uevent, but it's not easy to @@ -4399,13 +4435,6 @@ EXPORT_SYMBOL_GPL(device_destroy); * kernel device renaming. Besides that, it's not even implemented now for * other things than (driver-core wise very simple) network devices. * - * We are currently about to change network renaming in udev to completely - * disallow renaming of devices in the same namespace as the kernel uses, - * because we can't solve the problems properly, that arise with swapping names - * of multiple interfaces without races. Means, renaming of eth[0-9]* will only - * be allowed to some other name than eth[0-9]*, for the aforementioned - * reasons. - * * Make up a "real" name in the driver before you register anything, or add * some other attributes for userspace to find the device, or use udev to add * symlinks -- but never rename kernel devices later, it's a complete mess. We @@ -4431,9 +4460,16 @@ int device_rename(struct device *dev, const char *new_name) } if (dev->class) { - error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, - kobj, old_device_name, + struct subsys_private *sp = class_to_subsys(dev->class); + + if (!sp) { + error = -EINVAL; + goto out; + } + + error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, new_name, kobject_namespace(kobj)); + subsys_put(sp); if (error) goto out; } @@ -4558,7 +4594,7 @@ static int device_attrs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { struct kobject *kobj = &dev->kobj; - struct class *class = dev->class; + const struct class *class = dev->class; const struct device_type *type = dev->type; int error; @@ -4616,6 +4652,7 @@ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { int error; struct kobject *kobj = &dev->kobj; + struct subsys_private *sp; dev = get_device(dev); if (!dev) @@ -4652,21 +4689,19 @@ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) if (error) goto out; -#ifdef CONFIG_BLOCK - if (sysfs_deprecated && dev->class == &block_class) - goto out; -#endif - /* * Change the owner of the symlink located in the class directory of * the device class associated with @dev which points to the actual * directory entry for @dev to @kuid/@kgid. This ensures that the * symlink shows the same permissions as its target. */ - error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, - dev_name(dev), kuid, kgid); - if (error) + sp = class_to_subsys(dev->class); + if (!sp) { + error = -EINVAL; goto out; + } + error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid); + subsys_put(sp); out: put_device(dev); @@ -4965,9 +5000,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } else { if (fwnode_is_primary(fn)) { dev->fwnode = fn->secondary; + + /* Skip nullifying fn->secondary if the primary is shared */ + if (parent && fn == parent->fwnode) + return; + /* Set fn->secondary = NULL, so fn remains the primary fwnode */ - if (!(parent && fn == parent->fwnode)) - fn->secondary = NULL; + fn->secondary = NULL; } else { dev->fwnode = NULL; } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 182c6122f815..c1815b9dae68 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -487,7 +487,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = { bool cpu_is_hotpluggable(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); - return dev && container_of(dev, struct cpu, dev)->hotpluggable; + return dev && container_of(dev, struct cpu, dev)->hotpluggable + && tick_nohz_cpu_hotpluggable(cpu); } EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8def2ba08a82..9c09ca5c4ab6 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -315,6 +315,8 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) dev_info(p->device, "deferred probe pending\n"); mutex_unlock(&deferred_probe_mutex); + + fw_devlink_probing_done(); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); @@ -364,6 +366,10 @@ static int deferred_probe_initcall(void) schedule_delayed_work(&deferred_probe_timeout_work, driver_deferred_probe_timeout * HZ); } + + if (!IS_ENABLED(CONFIG_MODULES)) + fw_devlink_probing_done(); + return 0; } late_initcall(deferred_probe_initcall); @@ -504,6 +510,27 @@ EXPORT_SYMBOL_GPL(device_bind_driver); static atomic_t probe_count = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); +static ssize_t state_synced_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret = 0; + + if (strcmp("1", buf)) + return -EINVAL; + + device_lock(dev); + if (!dev->state_synced) { + dev->state_synced = true; + dev_sync_state(dev); + } else { + ret = -EINVAL; + } + device_unlock(dev); + + return ret ? ret : count; +} + static ssize_t state_synced_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -515,7 +542,7 @@ static ssize_t state_synced_show(struct device *dev, return sysfs_emit(buf, "%u\n", val); } -static DEVICE_ATTR_RO(state_synced); +static DEVICE_ATTR_RW(state_synced); static void device_unbind_cleanup(struct device *dev) { @@ -708,7 +735,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv) calltime = ktime_get(); ret = really_probe(dev, drv); rettime = ktime_get(); - pr_debug("probe of %s returned %d after %lld usecs\n", + /* + * Don't change this to pr_debug() because that requires + * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the + * kernel commandline to print this all the time at the debug level. + */ + printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n", dev_name(dev), ret, ktime_us_delta(rettime, calltime)); return ret; } diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 1c06781f7114..91536ee05f14 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -167,7 +167,7 @@ static int devcd_free(struct device *dev, void *data) return 0; } -static ssize_t disabled_show(struct class *class, struct class_attribute *attr, +static ssize_t disabled_show(const struct class *class, const struct class_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", devcd_disabled); @@ -197,7 +197,7 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr, * so, above situation would not occur. */ -static ssize_t disabled_store(struct class *class, struct class_attribute *attr, +static ssize_t disabled_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t count) { long tmp = simple_strtol(buf, NULL, 10); @@ -226,7 +226,6 @@ ATTRIBUTE_GROUPS(devcd_class); static struct class devcd_class = { .name = "devcoredump", - .owner = THIS_MODULE, .dev_release = devcd_dev_release, .dev_groups = devcd_dev_groups, .class_groups = devcd_class_groups, diff --git a/drivers/base/devres.c b/drivers/base/devres.c index c0e100074aa3..5c998cfac335 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -722,20 +722,21 @@ static void devm_action_release(struct device *dev, void *res) } /** - * devm_add_action() - add a custom action to list of managed resources + * __devm_add_action() - add a custom action to list of managed resources * @dev: Device that owns the action * @action: Function that should be called * @data: Pointer to data passed to @action implementation + * @name: Name of the resource (for debugging purposes) * * This adds a custom action to the list of managed resources so that * it gets executed as part of standard resource unwinding. */ -int devm_add_action(struct device *dev, void (*action)(void *), void *data) +int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name) { struct action_devres *devres; - devres = devres_alloc(devm_action_release, - sizeof(struct action_devres), GFP_KERNEL); + devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres), + GFP_KERNEL, NUMA_NO_NODE, name); if (!devres) return -ENOMEM; @@ -745,7 +746,7 @@ int devm_add_action(struct device *dev, void (*action)(void *), void *data) devres_add(dev, devres); return 0; } -EXPORT_SYMBOL_GPL(devm_add_action); +EXPORT_SYMBOL_GPL(__devm_add_action); /** * devm_remove_action() - removes previously added custom action diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index ae72d4ba8547..b848764ef018 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -94,15 +94,6 @@ static struct file_system_type dev_fs_type = { .mount = public_dev_mount, }; -#ifdef CONFIG_BLOCK -static inline int is_blockdev(struct device *dev) -{ - return dev->class == &block_class; -} -#else -static inline int is_blockdev(struct device *dev) { return 0; } -#endif - static int devtmpfs_submit_req(struct req *req, const char *tmp) { init_completion(&req->done); diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig index 5166b323a0f8..5ca00e02fe82 100644 --- a/drivers/base/firmware_loader/Kconfig +++ b/drivers/base/firmware_loader/Kconfig @@ -3,6 +3,8 @@ menu "Firmware loader" config FW_LOADER tristate "Firmware loading facility" if EXPERT + select CRYPTO_HASH if FW_LOADER_DEBUG + select CRYPTO_SHA256 if FW_LOADER_DEBUG default y help This enables the firmware loading facility in the kernel. The kernel @@ -24,6 +26,17 @@ config FW_LOADER You also want to be sure to enable this built-in if you are going to enable built-in firmware (CONFIG_EXTRA_FIRMWARE). +config FW_LOADER_DEBUG + bool "Log filenames and checksums for loaded firmware" + depends on CRYPTO = FW_LOADER || CRYPTO=y + depends on DYNAMIC_DEBUG + depends on FW_LOADER + default FW_LOADER + help + Select this option to use dynamic debug to log firmware filenames and + SHA256 checksums to the kernel log for each firmware file that is + loaded. + if FW_LOADER config FW_LOADER_PAGED_BUF diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 017c4cdb219e..9d79d5ad9102 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -493,9 +493,9 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, const void *in_buffer)) { size_t size; - int i, len; + int i, len, maxlen = 0; int rc = -ENOENT; - char *path; + char *path, *nt = NULL; size_t msize = INT_MAX; void *buffer = NULL; @@ -518,8 +518,17 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, if (!fw_path[i][0]) continue; - len = snprintf(path, PATH_MAX, "%s/%s%s", - fw_path[i], fw_priv->fw_name, suffix); + /* strip off \n from customized path */ + maxlen = strlen(fw_path[i]); + if (i == 0) { + nt = strchr(fw_path[i], '\n'); + if (nt) + maxlen = nt - fw_path[i]; + } + + len = snprintf(path, PATH_MAX, "%.*s/%s%s", + maxlen, fw_path[i], + fw_priv->fw_name, suffix); if (len >= PATH_MAX) { rc = -ENAMETOOLONG; break; @@ -791,6 +800,50 @@ static void fw_abort_batch_reqs(struct firmware *fw) mutex_unlock(&fw_lock); } +#if defined(CONFIG_FW_LOADER_DEBUG) +#include <crypto/hash.h> +#include <crypto/sha2.h> + +static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device) +{ + struct shash_desc *shash; + struct crypto_shash *alg; + u8 *sha256buf; + char *outbuf; + + alg = crypto_alloc_shash("sha256", 0, 0); + if (!alg) + return; + + sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); + outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL); + shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL); + if (!sha256buf || !outbuf || !shash) + goto out_free; + + shash->tfm = alg; + + if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0) + goto out_shash; + + for (int i = 0; i < SHA256_DIGEST_SIZE; i++) + sprintf(&outbuf[i * 2], "%02x", sha256buf[i]); + outbuf[SHA256_BLOCK_SIZE] = 0; + dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf); + +out_shash: + crypto_free_shash(alg); +out_free: + kfree(shash); + kfree(outbuf); + kfree(sha256buf); +} +#else +static void fw_log_firmware_info(const struct firmware *fw, const char *name, + struct device *device) +{} +#endif + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -861,11 +914,13 @@ _request_firmware(const struct firmware **firmware_p, const char *name, revert_creds(old_cred); put_cred(kern_cred); - out: +out: if (ret < 0) { fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; + } else { + fw_log_firmware_info(fw, name, device); } *firmware_p = fw; diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c index 56911d75b90a..c9c93b47d9a5 100644 --- a/drivers/base/firmware_loader/sysfs.c +++ b/drivers/base/firmware_loader/sysfs.c @@ -25,7 +25,7 @@ void __fw_load_abort(struct fw_priv *fw_priv) } #ifdef CONFIG_FW_LOADER_USER_HELPER -static ssize_t timeout_show(struct class *class, struct class_attribute *attr, +static ssize_t timeout_show(const struct class *class, const struct class_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); @@ -44,7 +44,7 @@ static ssize_t timeout_show(struct class *class, struct class_attribute *attr, * * Note: zero means 'wait forever'. **/ -static ssize_t timeout_store(struct class *class, struct class_attribute *attr, +static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr, const char *buf, size_t count) { int tmp_loading_timeout = simple_strtol(buf, NULL, 10); diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h index 82cde9f1b161..3f3f61307998 100644 --- a/drivers/base/physical_location.h +++ b/drivers/base/physical_location.h @@ -8,7 +8,7 @@ #include <linux/device.h> #ifdef CONFIG_ACPI -extern bool dev_add_physical_location(struct device *dev); +bool dev_add_physical_location(struct device *dev); extern const struct attribute_group dev_attr_physical_location_group; #else static inline bool dev_add_physical_location(struct device *dev) { return false; }; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c50139207794..f85f3515c258 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -679,7 +679,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) static void async_resume_noirq(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = device_resume_noirq(dev, pm_transition, true); @@ -816,7 +816,7 @@ Out: static void async_resume_early(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = device_resume_early(dev, pm_transition, true); @@ -980,7 +980,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) static void async_resume(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = device_resume(dev, pm_transition, true); @@ -1269,7 +1269,7 @@ Complete: static void async_suspend_noirq(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = __device_suspend_noirq(dev, pm_transition, true); @@ -1450,7 +1450,7 @@ Complete: static void async_suspend_late(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = __device_suspend_late(dev, pm_transition, true); @@ -1727,7 +1727,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) static void async_suspend(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = __device_suspend(dev, pm_transition, true); diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c index 924fac493c4f..6732ed2869f9 100644 --- a/drivers/base/power/wakeup_stats.c +++ b/drivers/base/power/wakeup_stats.c @@ -210,7 +210,7 @@ void wakeup_source_sysfs_remove(struct wakeup_source *ws) static int __init wakeup_sources_sysfs_init(void) { - wakeup_class = class_create(THIS_MODULE, "wakeup"); + wakeup_class = class_create("wakeup"); return PTR_ERR_OR_ZERO(wakeup_class); } diff --git a/drivers/base/property.c b/drivers/base/property.c index 083a95791d3b..f6117ec9805c 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -37,8 +37,10 @@ EXPORT_SYMBOL_GPL(__dev_fwnode_const); * @propname: Name of the property * * Check if property @propname is present in the device firmware description. + * + * Return: true if property @propname is present. Otherwise, returns false. */ -bool device_property_present(struct device *dev, const char *propname) +bool device_property_present(const struct device *dev, const char *propname) { return fwnode_property_present(dev_fwnode(dev), propname); } @@ -48,6 +50,8 @@ EXPORT_SYMBOL_GPL(device_property_present); * fwnode_property_present - check if a property of a firmware node is present * @fwnode: Firmware node whose property to check * @propname: Name of the property + * + * Return: true if property @propname is present. Otherwise, returns false. */ bool fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) @@ -86,7 +90,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present); * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ -int device_property_read_u8_array(struct device *dev, const char *propname, +int device_property_read_u8_array(const struct device *dev, const char *propname, u8 *val, size_t nval) { return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval); @@ -114,7 +118,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array); * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ -int device_property_read_u16_array(struct device *dev, const char *propname, +int device_property_read_u16_array(const struct device *dev, const char *propname, u16 *val, size_t nval) { return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval); @@ -142,7 +146,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array); * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ -int device_property_read_u32_array(struct device *dev, const char *propname, +int device_property_read_u32_array(const struct device *dev, const char *propname, u32 *val, size_t nval) { return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval); @@ -170,7 +174,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array); * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ -int device_property_read_u64_array(struct device *dev, const char *propname, +int device_property_read_u64_array(const struct device *dev, const char *propname, u64 *val, size_t nval) { return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval); @@ -198,7 +202,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array); * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ -int device_property_read_string_array(struct device *dev, const char *propname, +int device_property_read_string_array(const struct device *dev, const char *propname, const char **val, size_t nval) { return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval); @@ -220,7 +224,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array); * %-EPROTO or %-EILSEQ if the property type is not a string. * %-ENXIO if no suitable firmware interface is present. */ -int device_property_read_string(struct device *dev, const char *propname, +int device_property_read_string(const struct device *dev, const char *propname, const char **val) { return fwnode_property_read_string(dev_fwnode(dev), propname, val); @@ -242,7 +246,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string); * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ -int device_property_match_string(struct device *dev, const char *propname, +int device_property_match_string(const struct device *dev, const char *propname, const char *string) { return fwnode_property_match_string(dev_fwnode(dev), propname, string); @@ -508,10 +512,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string); * Obtain a reference based on a named property in an fwnode, with * integer arguments. * - * Caller is responsible to call fwnode_handle_put() on the returned - * args->fwnode pointer. + * The caller is responsible for calling fwnode_handle_put() on the returned + * @args->fwnode pointer. * - * Returns: %0 on success + * Return: %0 on success * %-ENOENT when the index is out of bounds, the index has an empty * reference or the property was not found * %-EINVAL on parse error @@ -547,8 +551,11 @@ EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); * * @index can be used when the named reference holds a table of references. * - * Returns pointer to the reference fwnode, or ERR_PTR. Caller is responsible to - * call fwnode_handle_put() on the returned fwnode pointer. + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. + * + * Return: a pointer to the reference fwnode, when found. Otherwise, + * returns an error pointer. */ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, const char *name, @@ -567,7 +574,7 @@ EXPORT_SYMBOL_GPL(fwnode_find_reference); * fwnode_get_name - Return the name of a node * @fwnode: The firmware node * - * Returns a pointer to the node name. + * Return: a pointer to the node name, or %NULL. */ const char *fwnode_get_name(const struct fwnode_handle *fwnode) { @@ -579,7 +586,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_name); * fwnode_get_name_prefix - Return the prefix of node for printing purposes * @fwnode: The firmware node * - * Returns the prefix of a node, intended to be printed right before the node. + * Return: the prefix of a node, intended to be printed right before the node. * The prefix works also as a separator between the nodes. */ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode) @@ -591,7 +598,10 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode) * fwnode_get_parent - Return parent firwmare node * @fwnode: Firmware whose parent is retrieved * - * Return parent firmware node of the given node if possible or %NULL if no + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. + * + * Return: parent firmware node of the given node if possible or %NULL if no * parent was available. */ struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode) @@ -608,8 +618,12 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent); * on the passed node, making it suitable for iterating through a * node's parents. * - * Returns a node pointer with refcount incremented, use - * fwnode_handle_put() on it when done. + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. Note that this function also puts a reference to @fwnode + * unconditionally. + * + * Return: parent firmware node of the given node if possible or %NULL if no + * parent was available. */ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) { @@ -629,10 +643,12 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent); * firmware node that has a corresponding struct device and returns that struct * device. * - * The caller of this function is expected to call put_device() on the returned - * device when they are done. + * The caller is responsible for calling put_device() on the returned device + * pointer. + * + * Return: a pointer to the device of the @fwnode's closest ancestor. */ -struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode) +struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode) { struct fwnode_handle *parent; struct device *dev; @@ -651,7 +667,7 @@ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode) * fwnode_count_parents - Return the number of parents a node has * @fwnode: The node the parents of which are to be counted * - * Returns the number of parents a node has. + * Return: the number of parents a node has. */ unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode) { @@ -670,12 +686,12 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents); * @fwnode: The node the parent of which is requested * @depth: Distance of the parent from the node * - * Returns the nth parent of a node. If there is no parent at the requested + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. + * + * Return: the nth parent of a node. If there is no parent at the requested * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on. - * - * The caller is responsible for calling fwnode_handle_put() for the returned - * node. */ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, unsigned int depth) @@ -700,9 +716,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); * * A node is considered an ancestor of itself too. * - * Returns true if @ancestor is an ancestor of @child. Otherwise, returns false. + * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false. */ -bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child) +bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child) { struct fwnode_handle *parent; @@ -725,6 +741,10 @@ bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle * fwnode_get_next_child_node - Return the next child node handle for a node * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. Note that this function also puts a reference to @child + * unconditionally. */ struct fwnode_handle * fwnode_get_next_child_node(const struct fwnode_handle *fwnode, @@ -735,10 +755,13 @@ fwnode_get_next_child_node(const struct fwnode_handle *fwnode, EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); /** - * fwnode_get_next_available_child_node - Return the next - * available child node handle for a node + * fwnode_get_next_available_child_node - Return the next available child node handle for a node * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. Note that this function also puts a reference to @child + * unconditionally. */ struct fwnode_handle * fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode, @@ -762,7 +785,11 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node); /** * device_get_next_child_node - Return the next child node handle for a device * @dev: Device to find the next child node for. - * @child: Handle to one of the device's child nodes or a null handle. + * @child: Handle to one of the device's child nodes or a %NULL handle. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. Note that this function also puts a reference to @child + * unconditionally. */ struct fwnode_handle *device_get_next_child_node(const struct device *dev, struct fwnode_handle *child) @@ -787,6 +814,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node); * fwnode_get_named_child_node - Return first matching named child node handle * @fwnode: Firmware node to find the named child node for. * @childname: String to match child node name against. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. */ struct fwnode_handle * fwnode_get_named_child_node(const struct fwnode_handle *fwnode, @@ -800,6 +830,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); * device_get_named_child_node - Return first matching named child node handle * @dev: Device to find the named child node for. * @childname: String to match child node name against. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. */ struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname) @@ -812,7 +845,10 @@ EXPORT_SYMBOL_GPL(device_get_named_child_node); * fwnode_handle_get - Obtain a reference to a device node * @fwnode: Pointer to the device node to obtain the reference to. * - * Returns the fwnode handle. + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. + * + * Return: the fwnode handle. */ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode) { @@ -841,6 +877,8 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put); * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. * + * Return: true if device is available for use. Otherwise, returns false. + * * For fwnode node types that don't implement the .device_is_available() * operation, this function returns true. */ @@ -859,6 +897,8 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available); /** * device_get_child_node_count - return the number of child nodes for device * @dev: Device to cound the child nodes for + * + * Return: the number of child nodes for a given device. */ unsigned int device_get_child_node_count(const struct device *dev) { @@ -895,7 +935,7 @@ EXPORT_SYMBOL_GPL(device_get_dma_attr); * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ -int fwnode_get_phy_mode(struct fwnode_handle *fwnode) +int fwnode_get_phy_mode(const struct fwnode_handle *fwnode) { const char *pm; int err, i; @@ -934,7 +974,7 @@ EXPORT_SYMBOL_GPL(device_get_phy_mode); * @fwnode: Pointer to the firmware node * @index: Index of the IO range * - * Returns a pointer to the mapped memory. + * Return: a pointer to the mapped memory. */ void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) { @@ -947,8 +987,8 @@ EXPORT_SYMBOL(fwnode_iomap); * @fwnode: Pointer to the firmware node * @index: Zero-based index of the IRQ * - * Returns Linux IRQ number on success. Other values are determined - * accordingly to acpi_/of_ irq_get() operation. + * Return: Linux IRQ number on success. Other values are determined + * according to acpi_irq_get() or of_irq_get() operation. */ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index) { @@ -967,8 +1007,7 @@ EXPORT_SYMBOL(fwnode_irq_get); * number of the IRQ resource corresponding to the index of the matched * string. * - * Return: - * Linux IRQ number on success, or negative errno otherwise. + * Return: Linux IRQ number on success, or negative errno otherwise. */ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name) { @@ -990,7 +1029,11 @@ EXPORT_SYMBOL(fwnode_irq_get_byname); * @fwnode: Pointer to the parent firmware node * @prev: Previous endpoint node or %NULL to get the first * - * Returns an endpoint firmware node pointer or %NULL if no more endpoints + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. Note that this function also puts a reference to @prev + * unconditionally. + * + * Return: an endpoint firmware node pointer or %NULL if no more endpoints * are available. */ struct fwnode_handle * @@ -1030,6 +1073,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint * @endpoint: Endpoint firmware node of the port * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. + * * Return: the firmware node of the device the @endpoint belongs to. */ struct fwnode_handle * @@ -1051,6 +1097,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent); * @fwnode: Endpoint firmware node pointing to the remote endpoint * * Extracts firmware node of a remote device the @fwnode points to. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. */ struct fwnode_handle * fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode) @@ -1071,6 +1120,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); * @fwnode: Endpoint firmware node pointing to the remote endpoint * * Extracts firmware node of a remote port the @fwnode points to. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. */ struct fwnode_handle * fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode) @@ -1084,6 +1136,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); * @fwnode: Endpoint firmware node pointing to the remote endpoint * * Extracts firmware node of a remote endpoint the @fwnode points to. + * + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. */ struct fwnode_handle * fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) @@ -1111,8 +1166,11 @@ static bool fwnode_graph_remote_available(struct fwnode_handle *ep) * @endpoint: identifier of the endpoint node under the port node * @flags: fwnode lookup flags * - * Return the fwnode handle of the local endpoint corresponding the port and - * endpoint IDs or NULL if not found. + * The caller is responsible for calling fwnode_handle_put() on the returned + * fwnode pointer. + * + * Return: the fwnode handle of the local endpoint corresponding the port and + * endpoint IDs or %NULL if not found. * * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint * has not been found, look for the closest endpoint ID greater than the @@ -1120,9 +1178,6 @@ static bool fwnode_graph_remote_available(struct fwnode_handle *ep) * * Does not return endpoints that belong to disabled devices or endpoints that * are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags. - * - * The returned endpoint needs to be released by calling fwnode_handle_put() on - * it when it is not needed any more. */ struct fwnode_handle * fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, @@ -1180,7 +1235,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id); * If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints * and endpoints connected to disabled devices are counted. */ -unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode, +unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode, unsigned long flags) { struct fwnode_handle *ep; @@ -1328,7 +1383,8 @@ EXPORT_SYMBOL_GPL(fwnode_connection_find_match); * @fwnode and other device nodes. @match will be used to convert the * connection description to data the caller is expecting to be returned * through the @matches array. - * If @matches is NULL @matches_len is ignored and the total number of resolved + * + * If @matches is %NULL @matches_len is ignored and the total number of resolved * matches is returned. * * Return: Number of matches resolved, or negative errno. diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index cd4bb642b9de..33a8366e22a5 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -9,10 +9,12 @@ config REGMAP select MDIO_BUS if REGMAP_MDIO bool -config REGCACHE_COMPRESSED - select LZO_COMPRESS - select LZO_DECOMPRESS - bool +config REGMAP_KUNIT + tristate "KUnit tests for regmap" + depends on KUNIT + default KUNIT_ALL_TESTS + select REGMAP + select REGMAP_RAM config REGMAP_AC97 tristate @@ -46,6 +48,9 @@ config REGMAP_MMIO config REGMAP_IRQ bool +config REGMAP_RAM + tristate + config REGMAP_SOUNDWIRE tristate depends on SOUNDWIRE diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 6990de7ca9a9..f6c6cb017200 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -3,11 +3,12 @@ CFLAGS_regmap.o := -I$(src) obj-$(CONFIG_REGMAP) += regmap.o regcache.o -obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o -obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o +obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o regcache-maple.o obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o +obj-$(CONFIG_REGMAP_KUNIT) += regmap-kunit.o obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o +obj-$(CONFIG_REGMAP_RAM) += regmap-ram.o obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index da8996e7a1f1..9bd0dfd1e259 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -31,8 +31,8 @@ struct regmap_format { size_t buf_size; size_t reg_bytes; size_t pad_bytes; - size_t reg_downshift; size_t val_bytes; + s8 reg_shift; void (*format_write)(struct regmap *map, unsigned int reg, unsigned int val); void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); @@ -270,6 +270,7 @@ unsigned int regcache_get_val(struct regmap *map, const void *base, bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, unsigned int val); int regcache_lookup_reg(struct regmap *map, unsigned int reg); +int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val); int _regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len, bool noinc); @@ -281,7 +282,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev, const struct regmap_config *config); extern struct regcache_ops regcache_rbtree_ops; -extern struct regcache_ops regcache_lzo_ops; +extern struct regcache_ops regcache_maple_ops; extern struct regcache_ops regcache_flat_ops; static inline const char *regmap_name(const struct regmap *map) @@ -307,4 +308,23 @@ static inline unsigned int regcache_get_index_by_order(const struct regmap *map, return reg >> map->reg_stride_order; } +struct regmap_ram_data { + unsigned int *vals; /* Allocatd by caller */ + bool *read; + bool *written; +}; + +/* + * Create a test register map with data stored in RAM, not intended + * for practical use. + */ +struct regmap *__regmap_init_ram(const struct regmap_config *config, + struct regmap_ram_data *data, + struct lock_class_key *lock_key, + const char *lock_name); + +#define regmap_init_ram(config, data) \ + __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data) + + #endif diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c deleted file mode 100644 index 7886303eb026..000000000000 --- a/drivers/base/regmap/regcache-lzo.c +++ /dev/null @@ -1,368 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// -// Register cache access API - LZO caching support -// -// Copyright 2011 Wolfson Microelectronics plc -// -// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> - -#include <linux/device.h> -#include <linux/lzo.h> -#include <linux/slab.h> - -#include "internal.h" - -static int regcache_lzo_exit(struct regmap *map); - -struct regcache_lzo_ctx { - void *wmem; - void *dst; - const void *src; - size_t src_len; - size_t dst_len; - size_t decompressed_size; - unsigned long *sync_bmp; - int sync_bmp_nbits; -}; - -#define LZO_BLOCK_NUM 8 -static int regcache_lzo_block_count(struct regmap *map) -{ - return LZO_BLOCK_NUM; -} - -static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx) -{ - lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); - if (!lzo_ctx->wmem) - return -ENOMEM; - return 0; -} - -static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx) -{ - size_t compress_size; - int ret; - - ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, - lzo_ctx->dst, &compress_size, lzo_ctx->wmem); - if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) - return -EINVAL; - lzo_ctx->dst_len = compress_size; - return 0; -} - -static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx) -{ - size_t dst_len; - int ret; - - dst_len = lzo_ctx->dst_len; - ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, - lzo_ctx->dst, &dst_len); - if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) - return -EINVAL; - return 0; -} - -static int regcache_lzo_compress_cache_block(struct regmap *map, - struct regcache_lzo_ctx *lzo_ctx) -{ - int ret; - - lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); - lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); - if (!lzo_ctx->dst) { - lzo_ctx->dst_len = 0; - return -ENOMEM; - } - - ret = regcache_lzo_compress(lzo_ctx); - if (ret < 0) - return ret; - return 0; -} - -static int regcache_lzo_decompress_cache_block(struct regmap *map, - struct regcache_lzo_ctx *lzo_ctx) -{ - int ret; - - lzo_ctx->dst_len = lzo_ctx->decompressed_size; - lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); - if (!lzo_ctx->dst) { - lzo_ctx->dst_len = 0; - return -ENOMEM; - } - - ret = regcache_lzo_decompress(lzo_ctx); - if (ret < 0) - return ret; - return 0; -} - -static inline int regcache_lzo_get_blkindex(struct regmap *map, - unsigned int reg) -{ - return ((reg / map->reg_stride) * map->cache_word_size) / - DIV_ROUND_UP(map->cache_size_raw, - regcache_lzo_block_count(map)); -} - -static inline int regcache_lzo_get_blkpos(struct regmap *map, - unsigned int reg) -{ - return (reg / map->reg_stride) % - (DIV_ROUND_UP(map->cache_size_raw, - regcache_lzo_block_count(map)) / - map->cache_word_size); -} - -static inline int regcache_lzo_get_blksize(struct regmap *map) -{ - return DIV_ROUND_UP(map->cache_size_raw, - regcache_lzo_block_count(map)); -} - -static int regcache_lzo_init(struct regmap *map) -{ - struct regcache_lzo_ctx **lzo_blocks; - size_t bmp_size; - int ret, i, blksize, blkcount; - const char *p, *end; - unsigned long *sync_bmp; - - ret = 0; - - blkcount = regcache_lzo_block_count(map); - map->cache = kcalloc(blkcount, sizeof(*lzo_blocks), - GFP_KERNEL); - if (!map->cache) - return -ENOMEM; - lzo_blocks = map->cache; - - /* - * allocate a bitmap to be used when syncing the cache with - * the hardware. Each time a register is modified, the corresponding - * bit is set in the bitmap, so we know that we have to sync - * that register. - */ - bmp_size = map->num_reg_defaults_raw; - sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL); - if (!sync_bmp) { - ret = -ENOMEM; - goto err; - } - - /* allocate the lzo blocks and initialize them */ - for (i = 0; i < blkcount; i++) { - lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, - GFP_KERNEL); - if (!lzo_blocks[i]) { - bitmap_free(sync_bmp); - ret = -ENOMEM; - goto err; - } - lzo_blocks[i]->sync_bmp = sync_bmp; - lzo_blocks[i]->sync_bmp_nbits = bmp_size; - /* alloc the working space for the compressed block */ - ret = regcache_lzo_prepare(lzo_blocks[i]); - if (ret < 0) - goto err; - } - - blksize = regcache_lzo_get_blksize(map); - p = map->reg_defaults_raw; - end = map->reg_defaults_raw + map->cache_size_raw; - /* compress the register map and fill the lzo blocks */ - for (i = 0; i < blkcount; i++, p += blksize) { - lzo_blocks[i]->src = p; - if (p + blksize > end) - lzo_blocks[i]->src_len = end - p; - else - lzo_blocks[i]->src_len = blksize; - ret = regcache_lzo_compress_cache_block(map, - lzo_blocks[i]); - if (ret < 0) - goto err; - lzo_blocks[i]->decompressed_size = - lzo_blocks[i]->src_len; - } - - return 0; -err: - regcache_lzo_exit(map); - return ret; -} - -static int regcache_lzo_exit(struct regmap *map) -{ - struct regcache_lzo_ctx **lzo_blocks; - int i, blkcount; - - lzo_blocks = map->cache; - if (!lzo_blocks) - return 0; - - blkcount = regcache_lzo_block_count(map); - /* - * the pointer to the bitmap used for syncing the cache - * is shared amongst all lzo_blocks. Ensure it is freed - * only once. - */ - if (lzo_blocks[0]) - bitmap_free(lzo_blocks[0]->sync_bmp); - for (i = 0; i < blkcount; i++) { - if (lzo_blocks[i]) { - kfree(lzo_blocks[i]->wmem); - kfree(lzo_blocks[i]->dst); - } - /* each lzo_block is a pointer returned by kmalloc or NULL */ - kfree(lzo_blocks[i]); - } - kfree(lzo_blocks); - map->cache = NULL; - return 0; -} - -static int regcache_lzo_read(struct regmap *map, - unsigned int reg, unsigned int *value) -{ - struct regcache_lzo_ctx *lzo_block, **lzo_blocks; - int ret, blkindex, blkpos; - size_t tmp_dst_len; - void *tmp_dst; - - /* index of the compressed lzo block */ - blkindex = regcache_lzo_get_blkindex(map, reg); - /* register index within the decompressed block */ - blkpos = regcache_lzo_get_blkpos(map, reg); - lzo_blocks = map->cache; - lzo_block = lzo_blocks[blkindex]; - - /* save the pointer and length of the compressed block */ - tmp_dst = lzo_block->dst; - tmp_dst_len = lzo_block->dst_len; - - /* prepare the source to be the compressed block */ - lzo_block->src = lzo_block->dst; - lzo_block->src_len = lzo_block->dst_len; - - /* decompress the block */ - ret = regcache_lzo_decompress_cache_block(map, lzo_block); - if (ret >= 0) - /* fetch the value from the cache */ - *value = regcache_get_val(map, lzo_block->dst, blkpos); - - kfree(lzo_block->dst); - /* restore the pointer and length of the compressed block */ - lzo_block->dst = tmp_dst; - lzo_block->dst_len = tmp_dst_len; - - return ret; -} - -static int regcache_lzo_write(struct regmap *map, - unsigned int reg, unsigned int value) -{ - struct regcache_lzo_ctx *lzo_block, **lzo_blocks; - int ret, blkindex, blkpos; - size_t tmp_dst_len; - void *tmp_dst; - - /* index of the compressed lzo block */ - blkindex = regcache_lzo_get_blkindex(map, reg); - /* register index within the decompressed block */ - blkpos = regcache_lzo_get_blkpos(map, reg); - lzo_blocks = map->cache; - lzo_block = lzo_blocks[blkindex]; - - /* save the pointer and length of the compressed block */ - tmp_dst = lzo_block->dst; - tmp_dst_len = lzo_block->dst_len; - - /* prepare the source to be the compressed block */ - lzo_block->src = lzo_block->dst; - lzo_block->src_len = lzo_block->dst_len; - - /* decompress the block */ - ret = regcache_lzo_decompress_cache_block(map, lzo_block); - if (ret < 0) { - kfree(lzo_block->dst); - goto out; - } - - /* write the new value to the cache */ - if (regcache_set_val(map, lzo_block->dst, blkpos, value)) { - kfree(lzo_block->dst); - goto out; - } - - /* prepare the source to be the decompressed block */ - lzo_block->src = lzo_block->dst; - lzo_block->src_len = lzo_block->dst_len; - - /* compress the block */ - ret = regcache_lzo_compress_cache_block(map, lzo_block); - if (ret < 0) { - kfree(lzo_block->dst); - kfree(lzo_block->src); - goto out; - } - - /* set the bit so we know we have to sync this register */ - set_bit(reg / map->reg_stride, lzo_block->sync_bmp); - kfree(tmp_dst); - kfree(lzo_block->src); - return 0; -out: - lzo_block->dst = tmp_dst; - lzo_block->dst_len = tmp_dst_len; - return ret; -} - -static int regcache_lzo_sync(struct regmap *map, unsigned int min, - unsigned int max) -{ - struct regcache_lzo_ctx **lzo_blocks; - unsigned int val; - int i; - int ret; - - lzo_blocks = map->cache; - i = min; - for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp, - lzo_blocks[0]->sync_bmp_nbits) { - if (i > max) - continue; - - ret = regcache_read(map, i, &val); - if (ret) - return ret; - - /* Is this the hardware default? If so skip. */ - ret = regcache_lookup_reg(map, i); - if (ret > 0 && val == map->reg_defaults[ret].def) - continue; - - map->cache_bypass = true; - ret = _regmap_write(map, i, val); - map->cache_bypass = false; - if (ret) - return ret; - dev_dbg(map->dev, "Synced register %#x, value %#x\n", - i, val); - } - - return 0; -} - -struct regcache_ops regcache_lzo_ops = { - .type = REGCACHE_COMPRESSED, - .name = "lzo", - .init = regcache_lzo_init, - .exit = regcache_lzo_exit, - .read = regcache_lzo_read, - .write = regcache_lzo_write, - .sync = regcache_lzo_sync -}; diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c new file mode 100644 index 000000000000..9b1b559107ef --- /dev/null +++ b/drivers/base/regmap/regcache-maple.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register cache access API - maple tree based cache +// +// Copyright 2023 Arm, Ltd +// +// Author: Mark Brown <broonie@kernel.org> + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/maple_tree.h> +#include <linux/slab.h> + +#include "internal.h" + +static int regcache_maple_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + struct maple_tree *mt = map->cache; + MA_STATE(mas, mt, reg, reg); + unsigned long *entry; + + rcu_read_lock(); + + entry = mas_walk(&mas); + if (!entry) { + rcu_read_unlock(); + return -ENOENT; + } + + *value = entry[reg - mas.index]; + + rcu_read_unlock(); + + return 0; +} + +static int regcache_maple_write(struct regmap *map, unsigned int reg, + unsigned int val) +{ + struct maple_tree *mt = map->cache; + MA_STATE(mas, mt, reg, reg); + unsigned long *entry, *upper, *lower; + unsigned long index, last; + size_t lower_sz, upper_sz; + int ret; + + rcu_read_lock(); + + entry = mas_walk(&mas); + if (entry) { + entry[reg - mas.index] = val; + rcu_read_unlock(); + return 0; + } + + /* Any adjacent entries to extend/merge? */ + mas_set_range(&mas, reg - 1, reg + 1); + index = reg; + last = reg; + + lower = mas_find(&mas, reg - 1); + if (lower) { + index = mas.index; + lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long); + } + + upper = mas_find(&mas, reg + 1); + if (upper) { + last = mas.last; + upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long); + } + + rcu_read_unlock(); + + entry = kmalloc((last - index + 1) * sizeof(unsigned long), + GFP_KERNEL); + if (!entry) + return -ENOMEM; + + if (lower) + memcpy(entry, lower, lower_sz); + entry[reg - index] = val; + if (upper) + memcpy(&entry[reg - index + 1], upper, upper_sz); + + /* + * This is safe because the regmap lock means the Maple lock + * is redundant, but we need to take it due to lockdep asserts + * in the maple tree code. + */ + mas_lock(&mas); + + mas_set_range(&mas, index, last); + ret = mas_store_gfp(&mas, entry, GFP_KERNEL); + + mas_unlock(&mas); + + if (ret == 0) { + kfree(lower); + kfree(upper); + } + + return ret; +} + +static int regcache_maple_drop(struct regmap *map, unsigned int min, + unsigned int max) +{ + struct maple_tree *mt = map->cache; + MA_STATE(mas, mt, min, max); + unsigned long *entry, *lower, *upper; + unsigned long lower_index, lower_last; + unsigned long upper_index, upper_last; + int ret; + + lower = NULL; + upper = NULL; + + mas_lock(&mas); + + mas_for_each(&mas, entry, max) { + /* + * This is safe because the regmap lock means the + * Maple lock is redundant, but we need to take it due + * to lockdep asserts in the maple tree code. + */ + mas_unlock(&mas); + + /* Do we need to save any of this entry? */ + if (mas.index < min) { + lower_index = mas.index; + lower_last = min -1; + + lower = kmemdup(entry, ((min - mas.index) * + sizeof(unsigned long)), + GFP_KERNEL); + if (!lower) { + ret = -ENOMEM; + goto out_unlocked; + } + } + + if (mas.last > max) { + upper_index = max + 1; + upper_last = mas.last; + + upper = kmemdup(&entry[max + 1], + ((mas.last - max) * + sizeof(unsigned long)), + GFP_KERNEL); + if (!upper) { + ret = -ENOMEM; + goto out_unlocked; + } + } + + kfree(entry); + mas_lock(&mas); + mas_erase(&mas); + + /* Insert new nodes with the saved data */ + if (lower) { + mas_set_range(&mas, lower_index, lower_last); + ret = mas_store_gfp(&mas, lower, GFP_KERNEL); + if (ret != 0) + goto out; + lower = NULL; + } + + if (upper) { + mas_set_range(&mas, upper_index, upper_last); + ret = mas_store_gfp(&mas, upper, GFP_KERNEL); + if (ret != 0) + goto out; + upper = NULL; + } + } + +out: + mas_unlock(&mas); +out_unlocked: + kfree(lower); + kfree(upper); + + return ret; +} + +static int regcache_maple_sync(struct regmap *map, unsigned int min, + unsigned int max) +{ + struct maple_tree *mt = map->cache; + unsigned long *entry; + MA_STATE(mas, mt, min, max); + unsigned long lmin = min; + unsigned long lmax = max; + unsigned int r; + int ret; + + map->cache_bypass = true; + + rcu_read_lock(); + + mas_for_each(&mas, entry, max) { + for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) { + ret = regcache_sync_val(map, r, entry[r - mas.index]); + if (ret != 0) + goto out; + } + } + +out: + rcu_read_unlock(); + + map->cache_bypass = false; + + return ret; +} + +static int regcache_maple_exit(struct regmap *map) +{ + struct maple_tree *mt = map->cache; + MA_STATE(mas, mt, 0, UINT_MAX); + unsigned int *entry;; + + /* if we've already been called then just return */ + if (!mt) + return 0; + + mas_lock(&mas); + mas_for_each(&mas, entry, UINT_MAX) + kfree(entry); + __mt_destroy(mt); + mas_unlock(&mas); + + kfree(mt); + map->cache = NULL; + + return 0; +} + +static int regcache_maple_init(struct regmap *map) +{ + struct maple_tree *mt; + int i; + int ret; + + mt = kmalloc(sizeof(*mt), GFP_KERNEL); + if (!mt) + return -ENOMEM; + map->cache = mt; + + mt_init(mt); + + for (i = 0; i < map->num_reg_defaults; i++) { + ret = regcache_maple_write(map, + map->reg_defaults[i].reg, + map->reg_defaults[i].def); + if (ret) + goto err; + } + + return 0; + +err: + regcache_maple_exit(map); + return ret; +} + +struct regcache_ops regcache_maple_ops = { + .type = REGCACHE_MAPLE, + .name = "maple", + .init = regcache_maple_init, + .exit = regcache_maple_exit, + .read = regcache_maple_read, + .write = regcache_maple_write, + .drop = regcache_maple_drop, + .sync = regcache_maple_sync, +}; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 362e043e26d8..029564695dbb 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -17,9 +17,7 @@ static const struct regcache_ops *cache_types[] = { ®cache_rbtree_ops, -#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED) - ®cache_lzo_ops, -#endif + ®cache_maple_ops, ®cache_flat_ops, }; @@ -148,7 +146,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) break; if (i == ARRAY_SIZE(cache_types)) { - dev_err(map->dev, "Could not match compress type: %d\n", + dev_err(map->dev, "Could not match cache type: %d\n", map->cache_type); return -EINVAL; } @@ -242,7 +240,7 @@ int regcache_read(struct regmap *map, int ret; if (map->cache_type == REGCACHE_NONE) - return -ENOSYS; + return -EINVAL; BUG_ON(!map->cache_ops); @@ -311,6 +309,8 @@ static int regcache_default_sync(struct regmap *map, unsigned int min, continue; ret = regcache_read(map, reg, &val); + if (ret == -ENOENT) + continue; if (ret) return ret; @@ -349,6 +349,9 @@ int regcache_sync(struct regmap *map) const char *name; bool bypass; + if (WARN_ON(map->cache_type == REGCACHE_NONE)) + return -EINVAL; + BUG_ON(!map->cache_ops); map->lock(map->lock_arg); @@ -418,6 +421,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min, const char *name; bool bypass; + if (WARN_ON(map->cache_type == REGCACHE_NONE)) + return -EINVAL; + BUG_ON(!map->cache_ops); map->lock(map->lock_arg); @@ -672,6 +678,30 @@ static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) return test_bit(idx, cache_present); } +int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val) +{ + int ret; + + if (!regcache_reg_needs_sync(map, reg, val)) + return 0; + + map->cache_bypass = true; + + ret = _regmap_write(map, reg, val); + + map->cache_bypass = false; + + if (ret != 0) { + dev_err(map->dev, "Unable to sync register %#x. %d\n", + reg, ret); + return ret; + } + dev_dbg(map->dev, "Synced register %#x, value %#x\n", + reg, val); + + return 0; +} + static int regcache_sync_block_single(struct regmap *map, void *block, unsigned long *cache_present, unsigned int block_base, @@ -688,21 +718,9 @@ static int regcache_sync_block_single(struct regmap *map, void *block, continue; val = regcache_get_val(map, block, i); - if (!regcache_reg_needs_sync(map, regtmp, val)) - continue; - - map->cache_bypass = true; - - ret = _regmap_write(map, regtmp, val); - - map->cache_bypass = false; - if (ret != 0) { - dev_err(map->dev, "Unable to sync register %#x. %d\n", - regtmp, ret); + ret = regcache_sync_val(map, regtmp, val); + if (ret != 0) return ret; - } - dev_dbg(map->dev, "Synced register %#x, value %#x\n", - regtmp, val); } return 0; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 8c903b8c9714..b99bb2369fff 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -329,8 +329,8 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) } if (d->chip->set_type_config) { - ret = d->chip->set_type_config(d->config_buf, type, - irq_data, reg); + ret = d->chip->set_type_config(d->config_buf, type, irq_data, + reg, d->chip->irq_drv_data); if (ret) return ret; } @@ -433,7 +433,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) * possible in order to reduce the I/O overheads. */ - if (chip->num_main_regs) { + if (chip->no_status) { + /* no status register so default to all active */ + memset32(data->status_buf, GENMASK(31, 0), chip->num_regs); + } else if (chip->num_main_regs) { unsigned int max_main_bits; unsigned long size; @@ -651,13 +654,15 @@ EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); * @type: The requested IRQ type. * @irq_data: The IRQ being configured. * @idx: Index of the irq's config registers within each array `buf[i]` + * @irq_drv_data: Driver specific IRQ data * * This is a &struct regmap_irq_chip->set_type_config callback suitable for * chips with one config register. Register values are updated according to * the &struct regmap_irq_type data associated with an IRQ. */ int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, - const struct regmap_irq *irq_data, int idx) + const struct regmap_irq *irq_data, + int idx, void *irq_drv_data) { const struct regmap_irq_type *t = &irq_data->type; @@ -949,12 +954,17 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, continue; /* Ack masked but set interrupts */ - reg = d->get_irq_reg(d, d->chip->status_base, i); - ret = regmap_read(map, reg, &d->status_buf[i]); - if (ret != 0) { - dev_err(map->dev, "Failed to read IRQ status: %d\n", - ret); - goto err_alloc; + if (d->chip->no_status) { + /* no status register so default to all active */ + d->status_buf[i] = GENMASK(31, 0); + } else { + reg = d->get_irq_reg(d, d->chip->status_base, i); + ret = regmap_read(map, reg, &d->status_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to read IRQ status: %d\n", + ret); + goto err_alloc; + } } if (chip->status_invert) diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c new file mode 100644 index 000000000000..f76d41688134 --- /dev/null +++ b/drivers/base/regmap/regmap-kunit.c @@ -0,0 +1,739 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// regmap KUnit tests +// +// Copyright 2023 Arm Ltd + +#include <kunit/test.h> +#include "internal.h" + +#define BLOCK_TEST_SIZE 12 + +static const struct regmap_config test_regmap_config = { + .max_register = BLOCK_TEST_SIZE, + .reg_stride = 1, + .val_bits = sizeof(unsigned int) * 8, +}; + +struct regcache_types { + enum regcache_type type; + const char *name; +}; + +static void case_to_desc(const struct regcache_types *t, char *desc) +{ + strcpy(desc, t->name); +} + +static const struct regcache_types regcache_types_list[] = { + { REGCACHE_NONE, "none" }, + { REGCACHE_FLAT, "flat" }, + { REGCACHE_RBTREE, "rbtree" }, + { REGCACHE_MAPLE, "maple" }, +}; + +KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc); + +static const struct regcache_types real_cache_types_list[] = { + { REGCACHE_FLAT, "flat" }, + { REGCACHE_RBTREE, "rbtree" }, + { REGCACHE_MAPLE, "maple" }, +}; + +KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc); + +static const struct regcache_types sparse_cache_types_list[] = { + { REGCACHE_RBTREE, "rbtree" }, + { REGCACHE_MAPLE, "maple" }, +}; + +KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc); + +static struct regmap *gen_regmap(struct regmap_config *config, + struct regmap_ram_data **data) +{ + unsigned int *buf; + struct regmap *ret; + size_t size = (config->max_register + 1) * sizeof(unsigned int); + int i; + struct reg_default *defaults; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + get_random_bytes(buf, size); + + *data = kzalloc(sizeof(**data), GFP_KERNEL); + if (!(*data)) + return ERR_PTR(-ENOMEM); + (*data)->vals = buf; + + if (config->num_reg_defaults) { + defaults = kcalloc(config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); + if (!defaults) + return ERR_PTR(-ENOMEM); + config->reg_defaults = defaults; + + for (i = 0; i < config->num_reg_defaults; i++) { + defaults[i].reg = i * config->reg_stride; + defaults[i].def = buf[i * config->reg_stride]; + } + } + + ret = regmap_init_ram(config, *data); + if (IS_ERR(ret)) { + kfree(buf); + kfree(*data); + } + + return ret; +} + +static void basic_read_write(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val, rval; + + config = test_regmap_config; + config.cache_type = t->type; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* If we write a value to a register we can read it back */ + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); + KUNIT_EXPECT_EQ(test, val, rval); + + /* If using a cache the cache satisfied the read */ + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]); + + regmap_exit(map); +} + +static void bulk_write(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* + * Data written via the bulk API can be read back with single + * reads. + */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, + BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); + + KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); + + /* If using a cache the cache satisfied the read */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + + regmap_exit(map); +} + +static void bulk_read(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* Data written as single writes can be read via the bulk API */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); + + /* If using a cache the cache satisfied the read */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + + regmap_exit(map); +} + +static void reg_defaults(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Read back the expected default data */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); + + /* The data should have been read from cache if there was one */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); +} + +static void reg_defaults_read_dev(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.num_reg_defaults_raw = BLOCK_TEST_SIZE; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* We should have read the cache defaults back from the map */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]); + data->read[i] = false; + } + + /* Read back the expected default data */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); + + /* The data should have been read from cache if there was one */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); +} + +static void register_patch(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + struct reg_sequence patch[2]; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + /* We need defaults so readback works */ + config = test_regmap_config; + config.cache_type = t->type; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Stash the original values */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + + /* Patch a couple of values */ + patch[0].reg = 2; + patch[0].def = rval[2] + 1; + patch[0].delay_us = 0; + patch[1].reg = 5; + patch[1].def = rval[5] + 1; + patch[1].delay_us = 0; + KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, + ARRAY_SIZE(patch))); + + /* Only the patched registers are written */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + switch (i) { + case 2: + case 5: + KUNIT_EXPECT_TRUE(test, data->written[i]); + KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); + break; + default: + KUNIT_EXPECT_FALSE(test, data->written[i]); + KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); + break; + } + } + + regmap_exit(map); +} + +static void stride(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.reg_stride = 2; + config.num_reg_defaults = BLOCK_TEST_SIZE / 2; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Only even registers can be accessed, try both read and write */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + data->read[i] = false; + data->written[i] = false; + + if (i % 2) { + KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval)); + KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval)); + KUNIT_EXPECT_FALSE(test, data->read[i]); + KUNIT_EXPECT_FALSE(test, data->written[i]); + } else { + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); + KUNIT_EXPECT_EQ(test, data->vals[i], rval); + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, + data->read[i]); + + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); + KUNIT_EXPECT_TRUE(test, data->written[i]); + } + } + + regmap_exit(map); +} + +static struct regmap_range_cfg test_range = { + .selector_reg = 1, + .selector_mask = 0xff, + + .window_start = 4, + .window_len = 10, + + .range_min = 20, + .range_max = 40, +}; + +static bool test_range_volatile(struct device *dev, unsigned int reg) +{ + if (reg >= test_range.window_start && + reg <= test_range.selector_reg + test_range.window_len) + return true; + + if (reg >= test_range.range_min && reg <= test_range.range_max) + return true; + + return false; +} + +static void basic_ranges(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.volatile_reg = test_range_volatile; + config.ranges = &test_range; + config.num_ranges = 1; + config.max_register = test_range.range_max; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + for (i = test_range.range_min; i < test_range.range_max; i++) { + data->read[i] = false; + data->written[i] = false; + } + + /* Reset the page to a non-zero value to trigger a change */ + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, + test_range.range_max)); + + /* Check we set the page and use the window for writes */ + data->written[test_range.selector_reg] = false; + data->written[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); + + data->written[test_range.selector_reg] = false; + data->written[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, + test_range.range_min + + test_range.window_len, + 0)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); + + /* Same for reads */ + data->written[test_range.selector_reg] = false; + data->read[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); + + data->written[test_range.selector_reg] = false; + data->read[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, + test_range.range_min + + test_range.window_len, + &val)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); + + /* No physical access triggered in the virtual range */ + for (i = test_range.range_min; i < test_range.range_max; i++) { + KUNIT_EXPECT_FALSE(test, data->read[i]); + KUNIT_EXPECT_FALSE(test, data->written[i]); + } + + regmap_exit(map); +} + +/* Try to stress dynamic creation of cache data structures */ +static void stress_insert(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval, *vals; + size_t buf_sz; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.max_register = 300; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register, + GFP_KERNEL); + KUNIT_ASSERT_FALSE(test, vals == NULL); + buf_sz = sizeof(unsigned long) * config.max_register; + + get_random_bytes(vals, buf_sz); + + /* Write data into the map/cache in ever decreasing strides */ + for (i = 0; i < config.max_register; i += 100) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i += 50) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i += 25) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i += 10) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i += 5) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i += 3) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i += 2) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + for (i = 0; i < config.max_register; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); + + /* Do reads from the cache (if there is one) match? */ + for (i = 0; i < config.max_register; i ++) { + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); + KUNIT_EXPECT_EQ(test, rval, vals[i]); + KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + } + + regmap_exit(map); +} + +static void cache_bypass(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val, rval; + + config = test_regmap_config; + config.cache_type = t->type; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* Ensure the cache has a value in it */ + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); + + /* Bypass then write a different value */ + regcache_cache_bypass(map, true); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1)); + + /* Read the bypassed value */ + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); + KUNIT_EXPECT_EQ(test, val + 1, rval); + KUNIT_EXPECT_EQ(test, data->vals[0], rval); + + /* Disable bypass, the cache should still return the original value */ + regcache_cache_bypass(map, false); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); + KUNIT_EXPECT_EQ(test, val, rval); + + regmap_exit(map); +} + +static void cache_sync(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* Put some data into the cache */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, + BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[i] = false; + + /* Trash the data on the device itself then resync */ + regcache_mark_dirty(map); + memset(data->vals, 0, sizeof(val)); + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + + /* Did we just write the correct data out? */ + KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, true, data->written[i]); + + regmap_exit(map); +} + +static void cache_sync_defaults(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + get_random_bytes(&val, sizeof(val)); + + /* Change the value of one register */ + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val)); + + /* Resync */ + regcache_mark_dirty(map); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[i] = false; + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + + /* Did we just sync the one register we touched? */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, i == 2, data->written[i]); + + regmap_exit(map); +} + +static void cache_sync_patch(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + struct reg_sequence patch[2]; + unsigned int rval[BLOCK_TEST_SIZE], val; + int i; + + /* We need defaults so readback works */ + config = test_regmap_config; + config.cache_type = t->type; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Stash the original values */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + + /* Patch a couple of values */ + patch[0].reg = 2; + patch[0].def = rval[2] + 1; + patch[0].delay_us = 0; + patch[1].reg = 5; + patch[1].def = rval[5] + 1; + patch[1].delay_us = 0; + KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, + ARRAY_SIZE(patch))); + + /* Sync the cache */ + regcache_mark_dirty(map); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[i] = false; + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + + /* The patch should be on the device but not in the cache */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); + KUNIT_EXPECT_EQ(test, val, rval[i]); + + switch (i) { + case 2: + case 5: + KUNIT_EXPECT_EQ(test, true, data->written[i]); + KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); + break; + default: + KUNIT_EXPECT_EQ(test, false, data->written[i]); + KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); + break; + } + } + + regmap_exit(map); +} + +static void cache_drop(struct kunit *test) +{ + struct regcache_types *t = (struct regcache_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.cache_type = t->type; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(&config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Ensure the data is read from the cache */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->read[i] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) { + KUNIT_EXPECT_FALSE(test, data->read[i]); + data->read[i] = false; + } + KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); + + /* Drop some registers */ + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5)); + + /* Reread and check only the dropped registers hit the device. */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5); + KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); + + regmap_exit(map); +} + +static struct kunit_case regmap_test_cases[] = { + KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), + KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), + KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), + KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), + KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params), + KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params), + KUNIT_CASE_PARAM(stride, regcache_types_gen_params), + KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), + KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), + KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), + {} +}; + +static struct kunit_suite regmap_test_suite = { + .name = "regmap", + .test_cases = regmap_test_cases, +}; +kunit_test_suite(regmap_test_suite); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c new file mode 100644 index 000000000000..85f34a5dee04 --- /dev/null +++ b/drivers/base/regmap/regmap-ram.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - Memory region +// +// This is intended for testing only +// +// Copyright (c) 2023, Arm Ltd + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/swab.h> + +#include "internal.h" + +static int regmap_ram_write(void *context, unsigned int reg, unsigned int val) +{ + struct regmap_ram_data *data = context; + + data->vals[reg] = val; + data->written[reg] = true; + + return 0; +} + +static int regmap_ram_read(void *context, unsigned int reg, unsigned int *val) +{ + struct regmap_ram_data *data = context; + + *val = data->vals[reg]; + data->read[reg] = true; + + return 0; +} + +static void regmap_ram_free_context(void *context) +{ + struct regmap_ram_data *data = context; + + kfree(data->vals); + kfree(data->read); + kfree(data->written); + kfree(data); +} + +static const struct regmap_bus regmap_ram = { + .fast_io = true, + .reg_write = regmap_ram_write, + .reg_read = regmap_ram_read, + .free_context = regmap_ram_free_context, +}; + +struct regmap *__regmap_init_ram(const struct regmap_config *config, + struct regmap_ram_data *data, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap *map; + + if (!config->max_register) { + pr_crit("No max_register specified for RAM regmap\n"); + return ERR_PTR(-EINVAL); + } + + data->read = kcalloc(sizeof(bool), config->max_register + 1, + GFP_KERNEL); + if (!data->read) + return ERR_PTR(-ENOMEM); + + data->written = kcalloc(sizeof(bool), config->max_register + 1, + GFP_KERNEL); + if (!data->written) + return ERR_PTR(-ENOMEM); + + map = __regmap_init(NULL, ®map_ram, data, config, + lock_key, lock_name); + + return map; +} +EXPORT_SYMBOL_GPL(__regmap_init_ram); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 81b0327f719d..09899ae99fc1 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -6,44 +6,53 @@ #include <linux/module.h> #include <linux/regmap.h> #include <linux/soundwire/sdw.h> +#include <linux/types.h> #include "internal.h" -static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val) +static int regmap_sdw_write(void *context, const void *val_buf, size_t val_size) { struct device *dev = context; struct sdw_slave *slave = dev_to_sdw_dev(dev); + /* First word of buffer contains the destination address */ + u32 addr = le32_to_cpu(*(const __le32 *)val_buf); + const u8 *val = val_buf; - return sdw_write_no_pm(slave, reg, val); + return sdw_nwrite_no_pm(slave, addr, val_size - sizeof(addr), val + sizeof(addr)); } -static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) +static int regmap_sdw_gather_write(void *context, + const void *reg_buf, size_t reg_size, + const void *val_buf, size_t val_size) { struct device *dev = context; struct sdw_slave *slave = dev_to_sdw_dev(dev); - int read; + u32 addr = le32_to_cpu(*(const __le32 *)reg_buf); - read = sdw_read_no_pm(slave, reg); - if (read < 0) - return read; + return sdw_nwrite_no_pm(slave, addr, val_size, val_buf); +} - *val = read; - return 0; +static int regmap_sdw_read(void *context, + const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size) +{ + struct device *dev = context; + struct sdw_slave *slave = dev_to_sdw_dev(dev); + u32 addr = le32_to_cpu(*(const __le32 *)reg_buf); + + return sdw_nread_no_pm(slave, addr, val_size, val_buf); } static const struct regmap_bus regmap_sdw = { - .reg_read = regmap_sdw_read, - .reg_write = regmap_sdw_write, + .write = regmap_sdw_write, + .gather_write = regmap_sdw_gather_write, + .read = regmap_sdw_read, .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, .val_format_endian_default = REGMAP_ENDIAN_LITTLE, }; static int regmap_sdw_config_check(const struct regmap_config *config) { - /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */ - if (config->val_bits != 8) - return -ENOTSUPP; - - /* Registers are 32 bits wide */ + /* Register addresses are 32 bits wide */ if (config->reg_bits != 32) return -ENOTSUPP; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index d2a54eb0efd9..db7851f0e3b8 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -814,7 +814,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map->format.pad_bytes = config->pad_bits / 8; - map->format.reg_downshift = config->reg_downshift; + map->format.reg_shift = config->reg_shift; map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); map->format.buf_size = DIV_ROUND_UP(config->reg_bits + config->val_bits + config->pad_bits, 8); @@ -1676,6 +1676,18 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, buf[i] |= (mask >> (8 * i)) & 0xff; } +static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg) +{ + reg += map->reg_base; + + if (map->format.reg_shift > 0) + reg >>= map->format.reg_shift; + else if (map->format.reg_shift < 0) + reg <<= -(map->format.reg_shift); + + return reg; +} + static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, const void *val, size_t val_len, bool noinc) { @@ -1753,8 +1765,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, return ret; } - reg += map->reg_base; - reg >>= map->format.reg_downshift; + reg = regmap_reg_addr(map, reg); map->format.format_reg(map->work_buf, reg, map->reg_shift); regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, map->write_flag_mask); @@ -1924,8 +1935,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, return ret; } - reg += map->reg_base; - reg >>= map->format.reg_downshift; + reg = regmap_reg_addr(map, reg); map->format.format_write(map, reg, val); trace_regmap_hw_write_start(map, reg, 1); @@ -1941,9 +1951,17 @@ static int _regmap_bus_reg_write(void *context, unsigned int reg, unsigned int val) { struct regmap *map = context; + struct regmap_range_node *range; + int ret; - reg += map->reg_base; - reg >>= map->format.reg_downshift; + range = _regmap_range_lookup(map, reg); + if (range) { + ret = _regmap_select_page(map, ®, range, 1); + if (ret != 0) + return ret; + } + + reg = regmap_reg_addr(map, reg); return map->bus->reg_write(map->bus_context, reg, val); } @@ -2494,8 +2512,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, unsigned int reg = regs[i].reg; unsigned int val = regs[i].def; trace_regmap_hw_write_start(map, reg, 1); - reg += map->reg_base; - reg >>= map->format.reg_downshift; + reg = regmap_reg_addr(map, reg); map->format.format_reg(u8, reg, map->reg_shift); u8 += reg_bytes + pad_bytes; map->format.format_val(u8, val, 0); @@ -2821,8 +2838,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return ret; } - reg += map->reg_base; - reg >>= map->format.reg_downshift; + reg = regmap_reg_addr(map, reg); map->format.format_reg(map->work_buf, reg, map->reg_shift); regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, map->read_flag_mask); @@ -2841,9 +2857,17 @@ static int _regmap_bus_reg_read(void *context, unsigned int reg, unsigned int *val) { struct regmap *map = context; + struct regmap_range_node *range; + int ret; - reg += map->reg_base; - reg >>= map->format.reg_downshift; + range = _regmap_range_lookup(map, reg); + if (range) { + ret = _regmap_select_page(map, ®, range, 1); + if (ret != 0) + return ret; + } + + reg = regmap_reg_addr(map, reg); return map->bus->reg_read(map->bus_context, reg, val); } @@ -3235,8 +3259,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, *change = false; if (regmap_volatile(map, reg) && map->reg_update_bits) { - reg += map->reg_base; - reg >>= map->format.reg_downshift; + reg = regmap_reg_addr(map, reg); ret = map->reg_update_bits(map->bus_context, reg, mask, val); if (ret == 0 && change) *change = true; diff --git a/drivers/base/soc.c b/drivers/base/soc.c index 0fb1d4ab9d8a..8dec5228fde3 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -7,6 +7,7 @@ #include <linux/sysfs.h> #include <linux/init.h> +#include <linux/of.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/idr.h> @@ -110,6 +111,18 @@ static void soc_release(struct device *dev) kfree(soc_dev); } +static void soc_device_get_machine(struct soc_device_attribute *soc_dev_attr) +{ + struct device_node *np; + + if (soc_dev_attr->machine) + return; + + np = of_find_node_by_path("/"); + of_property_read_string(np, "model", &soc_dev_attr->machine); + of_node_put(np); +} + static struct soc_device_attribute *early_soc_dev_attr; struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr) @@ -118,6 +131,8 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr const struct attribute_group **soc_attr_groups; int ret; + soc_device_get_machine(soc_dev_attr); + if (!soc_bus_registered) { if (early_soc_dev_attr) return ERR_PTR(-EBUSY); |