diff options
Diffstat (limited to 'drivers/base')
26 files changed, 1214 insertions, 303 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h index 7a419a7a6235..b405436ee28e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -60,12 +60,17 @@ struct driver_private { * @knode_parent - node in sibling list * @knode_driver - node in driver list * @knode_bus - node in bus list + * @knode_class - node in class list * @deferred_probe - entry in deferred_probe_list which is used to retry the * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. + * @async_driver - pointer to device driver awaiting probe via async_probe * @device - pointer back to the struct device that this structure is * associated with. + * @dead - This device is currently either in the process of or has been + * removed from the system. Any asynchronous events scheduled for this + * device should exit without taking any action. * * Nothing outside of the driver core should ever touch these fields. */ @@ -74,8 +79,11 @@ struct device_private { struct klist_node knode_parent; struct klist_node knode_driver; struct klist_node knode_bus; + struct klist_node knode_class; struct list_head deferred_probe; + struct device_driver *async_driver; struct device *device; + u8 dead:1; }; #define to_device_private_parent(obj) \ container_of(obj, struct device_private, knode_parent) @@ -83,6 +91,8 @@ struct device_private { container_of(obj, struct device_private, knode_driver) #define to_device_private_bus(obj) \ container_of(obj, struct device_private, knode_bus) +#define to_device_private_class(obj) \ + container_of(obj, struct device_private, knode_class) /* initialisation functions */ extern int devices_init(void); @@ -124,6 +134,8 @@ extern int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); extern void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); +int device_driver_attach(struct device_driver *drv, struct device *dev); +void device_driver_detach(struct device *dev); extern char *make_class_name(const char *name, struct kobject *kobj); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index e06a57936cc9..0a58e969f8b7 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -187,11 +187,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + device_driver_detach(dev); err = count; } put_device(dev); @@ -214,13 +210,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { - if (dev->parent && bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - err = driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && bus->need_parent_lock) - device_unlock(dev->parent); + err = device_driver_attach(drv, dev); if (err > 0) { /* success */ @@ -236,12 +226,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); -static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) +static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) { return sprintf(buf, "%d\n", bus->p->drivers_autoprobe); } -static ssize_t store_drivers_autoprobe(struct bus_type *bus, +static ssize_t drivers_autoprobe_store(struct bus_type *bus, const char *buf, size_t count) { if (buf[0] == '0') @@ -251,7 +241,7 @@ static ssize_t store_drivers_autoprobe(struct bus_type *bus, return count; } -static ssize_t store_drivers_probe(struct bus_type *bus, +static ssize_t drivers_probe_store(struct bus_type *bus, const char *buf, size_t count) { struct device *dev; @@ -586,9 +576,8 @@ static void remove_bind_files(struct device_driver *drv) driver_remove_file(drv, &driver_attr_unbind); } -static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe); -static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO, - show_drivers_autoprobe, store_drivers_autoprobe); +static BUS_ATTR_WO(drivers_probe); +static BUS_ATTR_RW(drivers_autoprobe); static int add_probe_files(struct bus_type *bus) { @@ -621,17 +610,6 @@ static ssize_t uevent_store(struct device_driver *drv, const char *buf, } static DRIVER_ATTR_WO(uevent); -static void driver_attach_async(void *_drv, async_cookie_t cookie) -{ - struct device_driver *drv = _drv; - int ret; - - ret = driver_attach(drv); - - pr_debug("bus: '%s': driver %s async attach completed: %d\n", - drv->bus->name, drv->name, ret); -} - /** * bus_add_driver - Add a driver to the bus. * @drv: driver. @@ -664,15 +642,9 @@ int bus_add_driver(struct device_driver *drv) klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); if (drv->bus->p->drivers_autoprobe) { - if (driver_allows_async_probing(drv)) { - pr_debug("bus: '%s': probing driver %s asynchronously\n", - drv->bus->name, drv->name); - async_schedule(driver_attach_async, drv); - } else { - error = driver_attach(drv); - if (error) - goto out_unregister; - } + error = driver_attach(drv); + if (error) + goto out_unregister; } module_add_driver(drv->owner, drv); @@ -774,13 +746,8 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices); */ int device_reprobe(struct device *dev) { - if (dev->driver) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); - } + if (dev->driver) + device_driver_detach(dev); return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); @@ -838,7 +805,14 @@ static ssize_t bus_uevent_store(struct bus_type *bus, rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); return rc ? rc : count; } -static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); +/* + * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() + * here, but can not use it as earlier in the file we have + * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store + * function name. + */ +static struct bus_attribute bus_attr_uevent = __ATTR(uevent, S_IWUSR, NULL, + bus_uevent_store); /** * bus_register - register a driver-core subsystem diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].size_prop; - if (of_property_read_u32(np, propname, &this_leaf->size)) - this_leaf->size = 0; + of_property_read_u32(np, propname, &this_leaf->size); } /* not cache_line_size() because that's a macro in include/linux/cache.h */ @@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].nr_sets_prop; - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) - this_leaf->number_of_sets = 0; + of_property_read_u32(np, propname, &this_leaf->number_of_sets); } static void cache_associativity(struct cacheinfo *this_leaf) diff --git a/drivers/base/class.c b/drivers/base/class.c index 54def4e02f00..d8a6a5864c2e 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -117,16 +117,22 @@ static void class_put(struct class *cls) kset_put(&cls->p->subsys); } +static struct device *klist_class_to_dev(struct klist_node *n) +{ + struct device_private *p = to_device_private_class(n); + return p->device; +} + static void klist_class_dev_get(struct klist_node *n) { - struct device *dev = container_of(n, struct device, knode_class); + struct device *dev = klist_class_to_dev(n); get_device(dev); } static void klist_class_dev_put(struct klist_node *n) { - struct device *dev = container_of(n, struct device, knode_class); + struct device *dev = klist_class_to_dev(n); put_device(dev); } @@ -277,7 +283,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, struct klist_node *start_knode = NULL; if (start) - start_knode = &start->knode_class; + start_knode = &start->p->knode_class; klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); iter->type = type; } @@ -304,7 +310,7 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter) knode = klist_next(&iter->ki); if (!knode) return NULL; - dev = container_of(knode, struct device, knode_class); + dev = klist_class_to_dev(knode); if (!iter->type || iter->type == dev->type) return dev; } diff --git a/drivers/base/component.c b/drivers/base/component.c index ddcea8739c12..532a3a5d8f63 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -16,11 +16,38 @@ #include <linux/slab.h> #include <linux/debugfs.h> +/** + * DOC: overview + * + * The component helper allows drivers to collect a pile of sub-devices, + * including their bound drivers, into an aggregate driver. Various subsystems + * already provide functions to get hold of such components, e.g. + * of_clk_get_by_name(). The component helper can be used when such a + * subsystem-specific way to find a device is not available: The component + * helper fills the niche of aggregate drivers for specific hardware, where + * further standardization into a subsystem would not be practical. The common + * example is when a logical device (e.g. a DRM display driver) is spread around + * the SoC on various components (scanout engines, blending blocks, transcoders + * for various outputs and so on). + * + * The component helper also doesn't solve runtime dependencies, e.g. for system + * suspend and resume operations. See also :ref:`device links<device_link>`. + * + * Components are registered using component_add() and unregistered with + * component_del(), usually from the driver's probe and disconnect functions. + * + * Aggregate drivers first assemble a component match list of what they need + * using component_match_add(). This is then registered as an aggregate driver + * using component_master_add_with_match(), and unregistered using + * component_master_del(). + */ + struct component; struct component_match_array { void *data; int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); void (*release)(struct device *, void *); struct component *component; bool duplicate; @@ -48,6 +75,7 @@ struct component { bool bound; const struct component_ops *ops; + int subcomponent; struct device *dev; }; @@ -132,7 +160,7 @@ static struct master *__master_find(struct device *dev, } static struct component *find_component(struct master *master, - int (*compare)(struct device *, void *), void *compare_data) + struct component_match_array *mc) { struct component *c; @@ -140,7 +168,11 @@ static struct component *find_component(struct master *master, if (c->master && c->master != master) continue; - if (compare(c->dev, compare_data)) + if (mc->compare && mc->compare(c->dev, mc->data)) + return c; + + if (mc->compare_typed && + mc->compare_typed(c->dev, c->subcomponent, mc->data)) return c; } @@ -166,7 +198,7 @@ static int find_components(struct master *master) if (match->compare[i].component) continue; - c = find_component(master, mc->compare, mc->data); + c = find_component(master, mc); if (!c) { ret = -ENXIO; break; @@ -301,15 +333,12 @@ static int component_match_realloc(struct device *dev, return 0; } -/* - * Add a component to be matched, with a release function. - * - * The match array is first created or extended if necessary. - */ -void component_match_add_release(struct device *master, +static void __component_match_add(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), - int (*compare)(struct device *, void *), void *compare_data) + int (*compare)(struct device *, void *), + int (*compare_typed)(struct device *, int, void *), + void *compare_data) { struct component_match *match = *matchptr; @@ -341,13 +370,69 @@ void component_match_add_release(struct device *master, } match->compare[match->num].compare = compare; + match->compare[match->num].compare_typed = compare_typed; match->compare[match->num].release = release; match->compare[match->num].data = compare_data; match->compare[match->num].component = NULL; match->num++; } + +/** + * component_match_add_release - add a component match entry with release callback + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @release: release function for @compare_data + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions, where upon @release will be called to free any references held by + * @compare_data, e.g. when @compare_data is a &device_node that must be + * released with of_node_put(). + * + * See also component_match_add() and component_match_add_typed(). + */ +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), void *compare_data) +{ + __component_match_add(master, matchptr, release, compare, NULL, + compare_data); +} EXPORT_SYMBOL(component_match_add_release); +/** + * component_match_add_typed - add a component match entry for a typed component + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @compare_typed: compare function to match against all typed components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add_typed(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions. + * + * See also component_match_add_release() and component_match_add_typed(). + */ +void component_match_add_typed(struct device *master, + struct component_match **matchptr, + int (*compare_typed)(struct device *, int, void *), void *compare_data) +{ + __component_match_add(master, matchptr, NULL, NULL, compare_typed, + compare_data); +} +EXPORT_SYMBOL(component_match_add_typed); + static void free_master(struct master *master) { struct component_match *match = master->match; @@ -367,6 +452,18 @@ static void free_master(struct master *master) kfree(master); } +/** + * component_master_add_with_match - register an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * @match: component match list for the aggregate driver + * + * Registers a new aggregate driver consisting of the components added to @match + * by calling one of the component_match_add() functions. Once all components in + * @match are available, it will be assembled by calling + * &component_master_ops.bind from @ops. Must be unregistered by calling + * component_master_del(). + */ int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, struct component_match *match) @@ -403,6 +500,15 @@ int component_master_add_with_match(struct device *dev, } EXPORT_SYMBOL_GPL(component_master_add_with_match); +/** + * component_master_del - unregister an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * + * Unregisters an aggregate driver registered with + * component_master_add_with_match(). If necessary the aggregate driver is first + * disassembled by calling &component_master_ops.unbind from @ops. + */ void component_master_del(struct device *dev, const struct component_master_ops *ops) { @@ -430,6 +536,15 @@ static void component_unbind(struct component *component, devres_release_group(component->dev, component); } +/** + * component_unbind_all - unbind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Unbinds all components of the aggregate @dev by passing @data to their + * &component_ops.unbind functions. Should be called from + * &component_master_ops.unbind. + */ void component_unbind_all(struct device *master_dev, void *data) { struct master *master; @@ -503,6 +618,15 @@ static int component_bind(struct component *component, struct master *master, return ret; } +/** + * component_bind_all - bind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Binds all components of the aggregate @dev by passing @data to their + * &component_ops.bind functions. Should be called from + * &component_master_ops.bind. + */ int component_bind_all(struct device *master_dev, void *data) { struct master *master; @@ -537,7 +661,8 @@ int component_bind_all(struct device *master_dev, void *data) } EXPORT_SYMBOL_GPL(component_bind_all); -int component_add(struct device *dev, const struct component_ops *ops) +static int __component_add(struct device *dev, const struct component_ops *ops, + int subcomponent) { struct component *component; int ret; @@ -548,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops) component->ops = ops; component->dev = dev; + component->subcomponent = subcomponent; dev_dbg(dev, "adding component (ops %ps)\n", ops); @@ -566,8 +692,66 @@ int component_add(struct device *dev, const struct component_ops *ops) return ret < 0 ? ret : 0; } + +/** + * component_add_typed - register a component + * @dev: component device + * @ops: component callbacks + * @subcomponent: nonzero identifier for subcomponents + * + * Register a new component for @dev. Functions in @ops will be call when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * @subcomponent must be nonzero and is used to differentiate between multiple + * components registerd on the same device @dev. These components are match + * using component_match_add_typed(). + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add(). + */ +int component_add_typed(struct device *dev, const struct component_ops *ops, + int subcomponent) +{ + if (WARN_ON(subcomponent == 0)) + return -EINVAL; + + return __component_add(dev, ops, subcomponent); +} +EXPORT_SYMBOL_GPL(component_add_typed); + +/** + * component_add - register a component + * @dev: component device + * @ops: component callbacks + * + * Register a new component for @dev. Functions in @ops will be called when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add_typed() for a variant that allows multipled different + * components on the same device. + */ +int component_add(struct device *dev, const struct component_ops *ops) +{ + return __component_add(dev, ops, 0); +} EXPORT_SYMBOL_GPL(component_add); +/** + * component_del - unregister a component + * @dev: component device + * @ops: component callbacks + * + * Unregister a component added with component_add(). If the component is bound + * into an aggregate driver, this will force the entire aggregate driver, including + * all its components, to be unbound. + */ void component_del(struct device *dev, const struct component_ops *ops) { struct component *c, *component = NULL; diff --git a/drivers/base/core.c b/drivers/base/core.c index 0073b09bb99f..4aeaa0c92bda 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -179,10 +179,31 @@ void device_pm_move_to_tail(struct device *dev) * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * - * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed - * automatically when the consumer device driver unbinds from it. - * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS - * set is invalid and will cause NULL to be returned. + * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by + * the driver core and, in particular, the caller of this function is expected + * to drop the reference to the link acquired by it directly. + * + * If that flag is not set, however, the caller of this function is handing the + * management of the link over to the driver core entirely and its return value + * can only be used to check whether or not the link is present. In that case, + * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link + * flags can be used to indicate to the driver core when the link can be safely + * deleted. Namely, setting one of them in @flags indicates to the driver core + * that the link is not going to be used (by the given caller of this function) + * after unbinding the consumer or supplier driver, respectively, from its + * device, so the link can be deleted at that point. If none of them is set, + * the link will be maintained until one of the devices pointed to by it (either + * the consumer or the supplier) is unregistered. + * + * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and + * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent + * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can + * be used to request the driver core to automaticall probe for a consmer + * driver after successfully binding a driver to the supplier device. + * + * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER + * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and + * will cause NULL to be returned upfront. * * A side effect of the link creation is re-ordering of dpm_list and the * devices_kset list by moving the consumer device and all devices depending @@ -199,10 +220,22 @@ struct device_link *device_link_add(struct device *consumer, struct device_link *link; if (!consumer || !supplier || - ((flags & DL_FLAG_STATELESS) && - (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) + (flags & DL_FLAG_STATELESS && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_AUTOPROBE_CONSUMER)) || + (flags & DL_FLAG_AUTOPROBE_CONSUMER && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER))) return NULL; + if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { + if (pm_runtime_get_sync(supplier) < 0) { + pm_runtime_put_noidle(supplier); + return NULL; + } + } + device_links_write_lock(); device_pm_lock(); @@ -217,35 +250,71 @@ struct device_link *device_link_add(struct device *consumer, goto out; } - list_for_each_entry(link, &supplier->links.consumers, s_node) - if (link->consumer == consumer) { + /* + * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed + * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both + * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer != consumer) + continue; + + /* + * Don't return a stateless link if the caller wants a stateful + * one and vice versa. + */ + if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) { + link = NULL; + goto out; + } + + if (flags & DL_FLAG_PM_RUNTIME) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + pm_runtime_new_link(consumer); + link->flags |= DL_FLAG_PM_RUNTIME; + } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + } + + if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); goto out; } + /* + * If the life time of the link following from the new flags is + * longer than indicated by the flags of the existing link, + * update the existing link to stay around longer. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; + } + } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { + link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER); + } + goto out; + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) goto out; + refcount_set(&link->rpm_active, 1); + if (flags & DL_FLAG_PM_RUNTIME) { - if (flags & DL_FLAG_RPM_ACTIVE) { - if (pm_runtime_get_sync(supplier) < 0) { - pm_runtime_put_noidle(supplier); - kfree(link); - link = NULL; - goto out; - } - link->rpm_active = true; - } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + pm_runtime_new_link(consumer); - /* - * If the link is being added by the consumer driver at probe - * time, balance the decrementation of the supplier's runtime PM - * usage counter after consumer probe in driver_probe_device(). - */ - if (consumer->links.status == DL_DEV_PROBING) - pm_runtime_get_noresume(supplier); } + get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); @@ -260,17 +329,26 @@ struct device_link *device_link_add(struct device *consumer, link->status = DL_STATE_NONE; } else { switch (supplier->links.status) { - case DL_DEV_DRIVER_BOUND: + case DL_DEV_PROBING: switch (consumer->links.status) { case DL_DEV_PROBING: /* - * Some callers expect the link creation during - * consumer driver probe to resume the supplier - * even without DL_FLAG_RPM_ACTIVE. + * A consumer driver can create a link to a + * supplier that has not completed its probing + * yet as long as it knows that the supplier is + * already functional (for example, it has just + * acquired some resources from the supplier). */ - if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_resume(supplier); - + link->status = DL_STATE_CONSUMER_PROBE; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } + break; + case DL_DEV_DRIVER_BOUND: + switch (consumer->links.status) { + case DL_DEV_PROBING: link->status = DL_STATE_CONSUMER_PROBE; break; case DL_DEV_DRIVER_BOUND: @@ -291,6 +369,14 @@ struct device_link *device_link_add(struct device *consumer, } /* + * Some callers expect the link creation during consumer driver probe to + * resume the supplier even without DL_FLAG_RPM_ACTIVE. + */ + if (link->status == DL_STATE_CONSUMER_PROBE && + flags & DL_FLAG_PM_RUNTIME) + pm_runtime_resume(supplier); + + /* * Move the consumer and all of the devices depending on it to the end * of dpm_list and the devices_kset list. * @@ -302,17 +388,24 @@ struct device_link *device_link_add(struct device *consumer, list_add_tail_rcu(&link->s_node, &supplier->links.consumers); list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); - dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); + dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); out: device_pm_unlock(); device_links_write_unlock(); + + if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) + pm_runtime_put(supplier); + return link; } EXPORT_SYMBOL_GPL(device_link_add); static void device_link_free(struct device_link *link) { + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + put_device(link->consumer); put_device(link->supplier); kfree(link); @@ -328,8 +421,8 @@ static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); - dev_info(link->consumer, "Dropping the link to %s\n", - dev_name(link->supplier)); + dev_dbg(link->consumer, "Dropping the link to %s\n", + dev_name(link->supplier)); if (link->flags & DL_FLAG_PM_RUNTIME) pm_runtime_drop_link(link->consumer); @@ -355,8 +448,16 @@ static void __device_link_del(struct kref *kref) } #endif /* !CONFIG_SRCU */ +static void device_link_put_kref(struct device_link *link) +{ + if (link->flags & DL_FLAG_STATELESS) + kref_put(&link->kref, __device_link_del); + else + WARN(1, "Unable to drop a managed device link reference\n"); +} + /** - * device_link_del - Delete a link between two devices. + * device_link_del - Delete a stateless link between two devices. * @link: Device link to delete. * * The caller must ensure proper synchronization of this function with runtime @@ -368,14 +469,14 @@ void device_link_del(struct device_link *link) { device_links_write_lock(); device_pm_lock(); - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); /** - * device_link_remove - remove a link between two devices. + * device_link_remove - Delete a stateless link between two devices. * @consumer: Consumer end of the link. * @supplier: Supplier end of the link. * @@ -394,7 +495,7 @@ void device_link_remove(void *consumer, struct device *supplier) list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer == consumer) { - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); break; } } @@ -474,8 +575,21 @@ void device_links_driver_bound(struct device *dev) if (link->flags & DL_FLAG_STATELESS) continue; + /* + * Links created during consumer probe may be in the "consumer + * probe" state to start with if the supplier is still probing + * when they are created and they may become "active" if the + * consumer probe returns first. Skip them here. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + continue; + WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + + if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + driver_deferred_probe_add(link->consumer); } list_for_each_entry(link, &dev->links.suppliers, c_node) { @@ -512,18 +626,49 @@ static void __device_links_no_driver(struct device *dev) continue; if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) - kref_put(&link->kref, __device_link_del); - else if (link->status != DL_STATE_SUPPLIER_UNBIND) + __device_link_del(&link->kref); + else if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } dev->links.status = DL_DEV_NO_DRIVER; } +/** + * device_links_no_driver - Update links after failing driver probe. + * @dev: Device whose driver has just failed to probe. + * + * Clean up leftover links to consumers for @dev and invoke + * %__device_links_no_driver() to update links to suppliers for it as + * appropriate. + * + * Links with the DL_FLAG_STATELESS flag set are ignored. + */ void device_links_no_driver(struct device *dev) { + struct device_link *link; + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (link->flags & DL_FLAG_STATELESS) + continue; + + /* + * The probe has failed, so if the status of the link is + * "consumer probe" or "active", it must have been added by + * a probing consumer while this device was still probing. + * Change its state to "dormant", as it represents a valid + * relationship, but it is not functionally meaningful. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + __device_links_no_driver(dev); + device_links_write_unlock(); } @@ -539,11 +684,11 @@ void device_links_no_driver(struct device *dev) */ void device_links_driver_cleanup(struct device *dev) { - struct device_link *link; + struct device_link *link, *ln; device_links_write_lock(); - list_for_each_entry(link, &dev->links.consumers, s_node) { + list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { if (link->flags & DL_FLAG_STATELESS) continue; @@ -557,7 +702,7 @@ void device_links_driver_cleanup(struct device *dev) */ if (link->status == DL_STATE_SUPPLIER_UNBIND && link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) - kref_put(&link->kref, __device_link_del); + __device_link_del(&link->kref); WRITE_ONCE(link->status, DL_STATE_DORMANT); } @@ -1966,7 +2111,7 @@ int device_add(struct device *dev) if (dev->class) { mutex_lock(&dev->class->p->mutex); /* tie the class to the device */ - klist_add_tail(&dev->knode_class, + klist_add_tail(&dev->p->knode_class, &dev->class->p->klist_devices); /* notify any interfaces that the device is here */ @@ -2080,6 +2225,17 @@ void device_del(struct device *dev) struct kobject *glue_dir = NULL; struct class_interface *class_intf; + /* + * Hold the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + device_lock(dev); + dev->p->dead = true; + device_unlock(dev); + /* Notify clients of device removal. This call must come * before dpm_sysfs_remove(). */ @@ -2105,7 +2261,7 @@ void device_del(struct device *dev) if (class_intf->remove_dev) class_intf->remove_dev(dev, class_intf); /* remove the device from the class list */ - klist_del(&dev->knode_class); + klist_del(&dev->p->knode_class); mutex_unlock(&dev->class->p->mutex); } device_remove_file(dev, &dev_attr_uevent); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index eb9443d5bae1..668139cfa664 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -409,6 +409,7 @@ static void device_create_release(struct device *dev) kfree(dev); } +__printf(4, 0) static struct device * __cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, @@ -427,6 +428,7 @@ __cpu_device_create(struct device *parent, void *drvdata, dev->parent = parent; dev->groups = groups; dev->release = device_create_release; + device_set_pm_not_required(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8ac10af17c00..a823f469e53f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -57,6 +57,10 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0); static struct dentry *deferred_devices; static bool initcalls_done; +/* Save the async probe drivers' name from kernel cmdline */ +#define ASYNC_DRV_NAMES_MAX_LEN 256 +static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN]; + /* * In some cases, like suspend to RAM or hibernation, It might be reasonable * to prohibit probing of devices as it could be unsafe. @@ -116,7 +120,7 @@ static void deferred_probe_work_func(struct work_struct *work) } static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func); -static void driver_deferred_probe_add(struct device *dev) +void driver_deferred_probe_add(struct device *dev) { mutex_lock(&deferred_probe_mutex); if (list_empty(&dev->p->deferred_probe)) { @@ -674,6 +678,23 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) return ret; } +static inline bool cmdline_requested_async_probing(const char *drv_name) +{ + return parse_option_str(async_probe_drv_names, drv_name); +} + +/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */ +static int __init save_async_options(char *buf) +{ + if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN) + printk(KERN_WARNING + "Too long list of driver names for 'driver_async_probe'!\n"); + + strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); + return 0; +} +__setup("driver_async_probe=", save_async_options); + bool driver_allows_async_probing(struct device_driver *drv) { switch (drv->probe_type) { @@ -684,6 +705,9 @@ bool driver_allows_async_probing(struct device_driver *drv) return false; default: + if (cmdline_requested_async_probing(drv->name)) + return true; + if (module_requested_async_probing(drv->owner)) return true; @@ -731,15 +755,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) bool async_allowed; int ret; - /* - * Check if device has already been claimed. This may - * happen with driver loading, device discovery/registration, - * and deferred probe processing happens all at once with - * multiple threads. - */ - if (dev->driver) - return -EBUSY; - ret = driver_match_device(drv, dev); if (ret == 0) { /* no match */ @@ -774,6 +789,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) device_lock(dev); + /* + * Check if device has already been removed or claimed. This may + * happen with driver loading, device discovery/registration, + * and deferred probe processing happens all at once with + * multiple threads. + */ + if (dev->p->dead || dev->driver) + goto out_unlock; + if (dev->parent) pm_runtime_get_sync(dev->parent); @@ -784,7 +808,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) if (dev->parent) pm_runtime_put(dev->parent); - +out_unlock: device_unlock(dev); put_device(dev); @@ -829,7 +853,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule(__device_attach_async_helper, dev); + async_schedule_dev(__device_attach_async_helper, dev); } else { pm_request_idle(dev); } @@ -867,6 +891,88 @@ void device_initial_probe(struct device *dev) __device_attach(dev, true); } +/* + * __device_driver_lock - acquire locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will take the required locks for manipulating dev->drv. + * Normally this will just be the @dev lock, but when called for a USB + * interface, @parent lock will be held as well. + */ +static void __device_driver_lock(struct device *dev, struct device *parent) +{ + if (parent && dev->bus->need_parent_lock) + device_lock(parent); + device_lock(dev); +} + +/* + * __device_driver_unlock - release locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will release the required locks for manipulating dev->drv. + * Normally this will just be the the @dev lock, but when called for a + * USB interface, @parent lock will be released as well. + */ +static void __device_driver_unlock(struct device *dev, struct device *parent) +{ + device_unlock(dev); + if (parent && dev->bus->need_parent_lock) + device_unlock(parent); +} + +/** + * device_driver_attach - attach a specific driver to a specific device + * @drv: Driver to attach + * @dev: Device to attach it to + * + * Manually attach driver to a device. Will acquire both @dev lock and + * @dev->parent lock if needed. + */ +int device_driver_attach(struct device_driver *drv, struct device *dev) +{ + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + return ret; +} + +static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) +{ + struct device *dev = _dev; + struct device_driver *drv; + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + drv = dev->p->async_driver; + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret); + + put_device(dev); +} + static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; @@ -894,14 +1000,26 @@ static int __driver_attach(struct device *dev, void *data) return ret; } /* ret > 0 means positive match */ - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - if (!dev->driver) - driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + if (driver_allows_async_probing(drv)) { + /* + * Instead of probing the device synchronously we will + * probe it asynchronously to allow for more parallelism. + * + * We only take the device lock here in order to guarantee + * that the dev->driver and async_driver fields are protected + */ + dev_dbg(dev, "probing driver %s asynchronously\n", drv->name); + device_lock(dev); + if (!dev->driver) { + get_device(dev); + dev->p->async_driver = drv; + async_schedule_dev(__driver_attach_async_helper, dev); + } + device_unlock(dev); + return 0; + } + + device_driver_attach(drv, dev); return 0; } @@ -932,15 +1050,11 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv = dev->driver; if (drv) { while (device_links_busy(dev)) { - device_unlock(dev); - if (parent && dev->bus->need_parent_lock) - device_unlock(parent); + __device_driver_unlock(dev, parent); device_links_unbind_consumers(dev); - if (parent && dev->bus->need_parent_lock) - device_lock(parent); - device_lock(dev); + __device_driver_lock(dev, parent); /* * A concurrent invocation of the same function might * have released the driver successfully while this one @@ -968,9 +1082,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); - arch_teardown_dma_ops(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) @@ -993,16 +1107,12 @@ void device_release_driver_internal(struct device *dev, struct device_driver *drv, struct device *parent) { - if (parent && dev->bus->need_parent_lock) - device_lock(parent); + __device_driver_lock(dev, parent); - device_lock(dev); if (!drv || drv == dev->driver) __device_release_driver(dev, parent); - device_unlock(dev); - if (parent && dev->bus->need_parent_lock) - device_unlock(parent); + __device_driver_unlock(dev, parent); } /** @@ -1028,6 +1138,18 @@ void device_release_driver(struct device *dev) EXPORT_SYMBOL_GPL(device_release_driver); /** + * device_driver_detach - detach driver from a specific device + * @dev: device to detach driver from + * + * Detach driver from device. Will acquire both @dev lock and @dev->parent + * lock if needed. + */ +void device_driver_detach(struct device *dev) +{ + device_release_driver_internal(dev, NULL, dev->parent); +} + +/** * driver_detach - detach driver from all devices it controls. * @drv: driver. */ diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c index d427e806cd73..04db9ae235e4 100644 --- a/drivers/base/devcon.c +++ b/drivers/base/devcon.c @@ -7,10 +7,37 @@ */ #include <linux/device.h> +#include <linux/property.h> static DEFINE_MUTEX(devcon_lock); static LIST_HEAD(devcon_list); +typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, + void *data); + +static void * +fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, + void *data, devcon_match_fn_t match) +{ + struct device_connection con = { .id = con_id }; + struct fwnode_handle *ep; + void *ret; + + fwnode_graph_for_each_endpoint(fwnode, ep) { + con.fwnode = fwnode_graph_get_remote_port_parent(ep); + if (!fwnode_device_is_available(con.fwnode)) + continue; + + ret = match(&con, -1, data); + fwnode_handle_put(con.fwnode); + if (ret) { + fwnode_handle_put(ep); + return ret; + } + } + return NULL; +} + /** * device_connection_find_match - Find physical connection to a device * @dev: Device with the connection @@ -23,10 +50,9 @@ static LIST_HEAD(devcon_list); * caller is expecting to be returned. */ void *device_connection_find_match(struct device *dev, const char *con_id, - void *data, - void *(*match)(struct device_connection *con, - int ep, void *data)) + void *data, devcon_match_fn_t match) { + struct fwnode_handle *fwnode = dev_fwnode(dev); const char *devname = dev_name(dev); struct device_connection *con; void *ret = NULL; @@ -35,6 +61,12 @@ void *device_connection_find_match(struct device *dev, const char *con_id, if (!match) return NULL; + if (fwnode) { + ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); + if (ret) + return ret; + } + mutex_lock(&devcon_lock); list_for_each_entry(con, &devcon_list, list) { @@ -75,12 +107,36 @@ static struct bus_type *generic_match_buses[] = { NULL, }; +static int device_fwnode_match(struct device *dev, void *fwnode) +{ + return dev_fwnode(dev) == fwnode; +} + +static void *device_connection_fwnode_match(struct device_connection *con) +{ + struct bus_type *bus; + struct device *dev; + + for (bus = generic_match_buses[0]; bus; bus++) { + dev = bus_find_device(bus, NULL, (void *)con->fwnode, + device_fwnode_match); + if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id))) + return dev; + + put_device(dev); + } + return NULL; +} + /* This tries to find the device from the most common bus types by name. */ static void *generic_match(struct device_connection *con, int ep, void *data) { struct bus_type *bus; struct device *dev; + if (con->fwnode) + return device_connection_fwnode_match(con); + for (bus = generic_match_buses[0]; bus; bus++) { dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]); if (dev) diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile index a97eeb0be1d8..0b2dfa6259c9 100644 --- a/drivers/base/firmware_loader/Makefile +++ b/drivers/base/firmware_loader/Makefile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for the Linux firmware loader -obj-y := fallback_table.o +obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o obj-$(CONFIG_FW_LOADER) += firmware_class.o firmware_class-objs := main.o firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o + +obj-y += builtin/ diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore new file mode 100644 index 000000000000..9c8bdb9fdcc3 --- /dev/null +++ b/drivers/base/firmware_loader/builtin/.gitignore @@ -0,0 +1 @@ +*.gen.S diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile new file mode 100644 index 000000000000..37e5ae387400 --- /dev/null +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a +# leading /, it's relative to $(srctree). +fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR)) +fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) + +obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE))) + +FWNAME = $(patsubst $(obj)/%.gen.S,%,$@) +FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))) +ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long) +ASM_ALIGN = $(if $(CONFIG_64BIT),3,2) +PROGBITS = $(if $(CONFIG_ARM),%,@)progbits + +filechk_fwbin = \ + echo "/* Generated by $(src)/Makefile */" ;\ + echo " .section .rodata" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo "_fw_$(FWSTR)_bin:" ;\ + echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ + echo "_fw_end:" ;\ + echo " .section .rodata.str,\"aMS\",$(PROGBITS),1" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo "_fw_$(FWSTR)_name:" ;\ + echo " .string \"$(FWNAME)\"" ;\ + echo " .section .builtin_fw,\"a\",$(PROGBITS)" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\ + echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\ + echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin" + +$(obj)/%.gen.S: FORCE + $(call filechk,fwbin) + +# The .o files depend on the binaries directly; the .S files don't. +$(addprefix $(obj)/, $(obj-y)): $(obj)/%.gen.o: $(fwdir)/% + +targets := $(patsubst $(obj)/%,%, \ + $(shell find $(obj) -name \*.gen.S 2>/dev/null)) diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index 7428659d8df9..776dd69cf5be 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -16,9 +16,6 @@ * firmware fallback configuration table */ -/* Module or buit-in */ -#ifdef CONFIG_FW_LOADER_USER_HELPER - static unsigned int zero; static unsigned int one = 1; @@ -51,5 +48,3 @@ struct ctl_table firmware_config_table[] = { { } }; EXPORT_SYMBOL_GPL(firmware_config_table); - -#endif diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 8e9213b36e31..7eaaf5ee5ba6 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -328,12 +328,12 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv) rc = kernel_read_file_from_path(path, &fw_priv->data, &size, msize, id); if (rc) { - if (rc == -ENOENT) - dev_dbg(device, "loading %s failed with error %d\n", - path, rc); - else + if (rc != -ENOENT) dev_warn(device, "loading %s failed with error %d\n", path, rc); + else + dev_dbg(device, "loading %s failed for no such file or directory.\n", + path); continue; } dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 1c958eb33ef4..4e45ac21d672 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -127,7 +127,20 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } - return r ? r->start : -ENXIO; + if (r) + return r->start; + + /* + * For the index 0 interrupt, allow falling back to GpioInt + * resources. While a device could have both Interrupt and GpioInt + * resources, making this fallback ambiguous, in many common cases + * the device will only expose one IRQ, and this fallback + * allows a common code path across either kind of resource. + */ + if (num == 0 && has_acpi_companion(&dev->dev)) + return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); + + return -ENXIO; #endif } EXPORT_SYMBOL_GPL(platform_get_irq); @@ -508,10 +521,12 @@ struct platform_device *platform_device_register_full( pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) - goto err_alloc; + return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; + pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); + pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { /* @@ -553,8 +568,6 @@ struct platform_device *platform_device_register_full( err: ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); - -err_alloc: platform_device_put(pdev); return ERR_PTR(ret); } diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5a42ae4078c2..365ad751ce0f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; } else { - clk_prepare(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", - ce->clk, ce->con_id); + if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, + "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); + } } } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b413951c6abc..22aedb28aad7 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); * For a detailed function description, see dev_pm_domain_attach_by_id(). */ struct device *dev_pm_domain_attach_by_name(struct device *dev, - char *name) + const char *name) { if (dev->pm_domain) return ERR_PTR(-EEXIST); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 500de1dee967..2c334c01fc43 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); * power-domain-names DT property. For further description see * genpd_dev_pm_attach_by_id(). */ -struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) +struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) { int index; @@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void) genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); - if (!genpd_debugfs_dir) - return -ENOMEM; - - d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - genpd_debugfs_dir, NULL, &summary_fops); - if (!d) - return -ENOMEM; + debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, + NULL, &summary_fops); list_for_each_entry(genpd, &gpd_list, gpd_list_node) { d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); - if (!d) - return -ENOMEM; debugfs_create_file("current_state", 0444, d, genpd, &status_fops); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0992e67e862b..5a8149829ab3 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -124,6 +124,10 @@ void device_pm_unlock(void) */ void device_pm_add(struct device *dev) { + /* Skip PM setup/initialization. */ + if (device_pm_not_required(dev)) + return; + pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); device_pm_check_callbacks(dev); @@ -142,6 +146,9 @@ void device_pm_add(struct device *dev) */ void device_pm_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; + pr_debug("PM: Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); @@ -727,7 +734,7 @@ void dpm_noirq_resume_devices(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume_noirq, dev); + async_schedule_dev(async_resume_noirq, dev); } } @@ -884,7 +891,7 @@ void dpm_resume_early(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume_early, dev); + async_schedule_dev(async_resume_early, dev); } } @@ -1048,7 +1055,7 @@ void dpm_resume(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume, dev); + async_schedule_dev(async_resume, dev); } } @@ -1368,7 +1375,7 @@ static int device_suspend_noirq(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend_noirq, dev); + async_schedule_dev(async_suspend_noirq, dev); return 0; } return __device_suspend_noirq(dev, pm_transition, false); @@ -1571,7 +1578,7 @@ static int device_suspend_late(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend_late, dev); + async_schedule_dev(async_suspend_late, dev); return 0; } @@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev)) { + pm_dev_dbg(dev, state, "direct-complete "); goto Complete; + } pm_runtime_enable(dev); } @@ -1835,7 +1844,7 @@ static int device_suspend(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend, dev); + async_schedule_dev(async_suspend, dev); return 0; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 457be03b744d..a80dbf08a99c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags); */ void update_pm_runtime_accounting(struct device *dev) { - unsigned long now = jiffies; - unsigned long delta; + u64 now, last, delta; - delta = now - dev->power.accounting_timestamp; + if (dev->power.disable_depth > 0) + return; + + last = dev->power.accounting_timestamp; + now = ktime_get_mono_fast_ns(); dev->power.accounting_timestamp = now; - if (dev->power.disable_depth > 0) + /* + * Because ktime_get_mono_fast_ns() is not monotonic during + * timekeeping updates, ensure that 'now' is after the last saved + * timesptamp. + */ + if (now < last) return; + delta = now - last; + if (dev->power.runtime_status == RPM_SUSPENDED) - dev->power.suspended_jiffies += delta; + dev->power.suspended_time += delta; else - dev->power.active_jiffies += delta; + dev->power.active_time += delta; } static void __update_runtime_status(struct device *dev, enum rpm_status status) @@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) dev->power.runtime_status = status; } +u64 pm_runtime_suspended_time(struct device *dev) +{ + u64 time; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + update_pm_runtime_accounting(dev); + time = dev->power.suspended_time; + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return time; +} +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); + /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. * @dev: Device to handle. @@ -95,7 +121,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - hrtimer_cancel(&dev->power.suspend_timer); + hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev) u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - u64 last_busy, expires = 0; - u64 now = ktime_to_ns(ktime_get()); + u64 expires; if (!dev->power.use_autosuspend) - goto out; + return 0; autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) - goto out; - - last_busy = READ_ONCE(dev->power.last_busy); + return 0; - expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; - if (expires <= now) - expires = 0; /* Already expired. */ + expires = READ_ONCE(dev->power.last_busy); + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; + if (expires > ktime_get_mono_fast_ns()) + return expires; /* Expires in the future */ - out: - return expires; + return 0; } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); @@ -259,11 +282,8 @@ static int rpm_get_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) - continue; - - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND || - link->rpm_active) + if (!(link->flags & DL_FLAG_PM_RUNTIME) || + READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) continue; retval = pm_runtime_get_sync(link->supplier); @@ -272,7 +292,7 @@ static int rpm_get_suppliers(struct device *dev) pm_runtime_put_noidle(link->supplier); return retval; } - link->rpm_active = true; + refcount_inc(&link->rpm_active); } return 0; } @@ -281,12 +301,13 @@ static void rpm_put_suppliers(struct device *dev) { struct device_link *link; - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->rpm_active && - READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { + if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) + continue; + + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); - link->rpm_active = false; - } + } } /** @@ -909,7 +930,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_to_ns(ktime_get())) { + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -928,7 +949,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; - ktime_t expires; + u64 expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -945,8 +966,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - expires = ktime_add(ktime_get(), ms_to_ktime(delay)); - dev->power.timer_expires = ktime_to_ns(expires); + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; + dev->power.timer_expires = expires; dev->power.timer_autosuspends = 0; hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); @@ -1091,24 +1112,57 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); * and the device parent's counter of unsuspended children is modified to * reflect the new status. If the new status is RPM_SUSPENDED, an idle * notification request for the parent is submitted. + * + * If @dev has any suppliers (as reflected by device links to them), and @status + * is RPM_ACTIVE, they will be activated upfront and if the activation of one + * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead + * of the @status value) and the suppliers will be deacticated on exit. The + * error returned by the failing supplier activation will be returned in that + * case. */ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; - unsigned long flags; bool notify_parent = false; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; - spin_lock_irqsave(&dev->power.lock, flags); + spin_lock_irq(&dev->power.lock); - if (!dev->power.runtime_error && !dev->power.disable_depth) { + /* + * Prevent PM-runtime from being enabled for the device or return an + * error if it is enabled already and working. + */ + if (dev->power.runtime_error || dev->power.disable_depth) + dev->power.disable_depth++; + else error = -EAGAIN; - goto out; + + spin_unlock_irq(&dev->power.lock); + + if (error) + return error; + + /* + * If the new status is RPM_ACTIVE, the suppliers can be activated + * upfront regardless of the current status, because next time + * rpm_put_suppliers() runs, the rpm_active refcounts of the links + * involved will be dropped down to one anyway. + */ + if (status == RPM_ACTIVE) { + int idx = device_links_read_lock(); + + error = rpm_get_suppliers(dev); + if (error) + status = RPM_SUSPENDED; + + device_links_read_unlock(idx); } + spin_lock_irq(&dev->power.lock); + if (dev->power.runtime_status == status || !parent) goto out_set; @@ -1136,19 +1190,33 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) spin_unlock(&parent->power.lock); - if (error) + if (error) { + status = RPM_SUSPENDED; goto out; + } } out_set: __update_runtime_status(dev, status); - dev->power.runtime_error = 0; + if (!error) + dev->power.runtime_error = 0; + out: - spin_unlock_irqrestore(&dev->power.lock, flags); + spin_unlock_irq(&dev->power.lock); if (notify_parent) pm_request_idle(parent); + if (status == RPM_SUSPENDED) { + int idx = device_links_read_lock(); + + rpm_put_suppliers(dev); + + device_links_read_unlock(idx); + } + + pm_runtime_enable(dev); + return error; } EXPORT_SYMBOL_GPL(__pm_runtime_set_status); @@ -1276,6 +1344,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) pm_runtime_put_noidle(dev); } + /* Update time accounting before disabling PM-runtime. */ + update_pm_runtime_accounting(dev); + if (!dev->power.disable_depth++) __pm_runtime_barrier(dev); @@ -1294,10 +1365,15 @@ void pm_runtime_enable(struct device *dev) spin_lock_irqsave(&dev->power.lock, flags); - if (dev->power.disable_depth > 0) + if (dev->power.disable_depth > 0) { dev->power.disable_depth--; - else + + /* About to enable runtime pm, set accounting_timestamp to now */ + if (!dev->power.disable_depth) + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); + } else { dev_warn(dev, "Unbalanced %s!\n", __func__); + } WARN(!dev->power.disable_depth && dev->power.runtime_status == RPM_SUSPENDED && @@ -1494,7 +1570,6 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1539,7 +1614,7 @@ void pm_runtime_remove(struct device *dev) * * Check links from this device to any consumers and if any of them have active * runtime PM references to the device, drop the usage counter of the device - * (once per link). + * (as many times as needed). * * Links with the DL_FLAG_STATELESS flag set are ignored. * @@ -1561,10 +1636,8 @@ void pm_runtime_clean_up_links(struct device *dev) if (link->flags & DL_FLAG_STATELESS) continue; - if (link->rpm_active) { + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put_noidle(dev); - link->rpm_active = false; - } } device_links_read_unlock(idx); @@ -1582,8 +1655,11 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) + if (link->flags & DL_FLAG_PM_RUNTIME) { + link->supplier_preactivated = true; + refcount_inc(&link->rpm_active); pm_runtime_get_sync(link->supplier); + } device_links_read_unlock(idx); } @@ -1600,8 +1676,11 @@ void pm_runtime_put_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_put(link->supplier); + if (link->supplier_preactivated) { + link->supplier_preactivated = false; + if (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + } device_links_read_unlock(idx); } @@ -1615,8 +1694,6 @@ void pm_runtime_new_link(struct device *dev) void pm_runtime_drop_link(struct device *dev) { - rpm_put_suppliers(dev); - spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); dev->power.links_count--; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d713738ce796..c6bf76124184 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; + u64 tmp; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + tmp = dev->power.active_time; + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); spin_unlock_irq(&dev->power.lock); return ret; } @@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; + u64 tmp; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", - jiffies_to_msecs(dev->power.suspended_jiffies)); + tmp = dev->power.suspended_time; + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); spin_unlock_irq(&dev->power.lock); return ret; } @@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev) { int rc; + /* No need to create PM sysfs if explicitly disabled. */ + if (device_pm_not_required(dev)) + return 0; + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); if (rc) return rc; @@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3..f1fee72ed970 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** - * pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_dev_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 2e8f0144f9ab..9cbb4b0cd01b 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -33,7 +33,7 @@ struct regcache_rbtree_node { unsigned int blklen; /* the actual rbtree node holding this block */ struct rb_node node; -} __attribute__ ((packed)); +}; struct regcache_rbtree_ctx { struct rb_root root; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 330c1f7e9665..5059748afd4c 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -35,6 +35,7 @@ struct regmap_irq_chip_data { int wake_count; void *status_reg_buf; + unsigned int *main_status_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; @@ -329,6 +330,33 @@ static const struct irq_chip regmap_irq_chip = { .irq_set_wake = regmap_irq_set_wake, }; +static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, + unsigned int b) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + struct regmap_irq_sub_irq_map *subreg; + int i, ret = 0; + + if (!chip->sub_reg_offsets) { + /* Assume linear mapping */ + ret = regmap_read(map, chip->status_base + + (b * map->reg_stride * data->irq_reg_stride), + &data->status_buf[b]); + } else { + subreg = &chip->sub_reg_offsets[b]; + for (i = 0; i < subreg->num_regs; i++) { + unsigned int offset = subreg->offset[i]; + + ret = regmap_read(map, chip->status_base + offset, + &data->status_buf[offset]); + if (ret) + break; + } + } + return ret; +} + static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; @@ -352,11 +380,65 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } /* - * Read in the statuses, using a single bulk read if possible - * in order to reduce the I/O overheads. + * Read only registers with active IRQs if the chip has 'main status + * register'. Else read in the statuses, using a single bulk read if + * possible in order to reduce the I/O overheads. */ - if (!map->use_single_read && map->reg_stride == 1 && - data->irq_reg_stride == 1) { + + if (chip->num_main_regs) { + unsigned int max_main_bits; + unsigned long size; + + size = chip->num_regs * sizeof(unsigned int); + + max_main_bits = (chip->num_main_status_bits) ? + chip->num_main_status_bits : chip->num_regs; + /* Clear the status buf as we don't read all status regs */ + memset(data->status_buf, 0, size); + + /* We could support bulk read for main status registers + * but I don't expect to see devices with really many main + * status registers so let's only support single reads for the + * sake of simplicity. and add bulk reads only if needed + */ + for (i = 0; i < chip->num_main_regs; i++) { + ret = regmap_read(map, chip->main_status + + (i * map->reg_stride + * data->irq_reg_stride), + &data->main_status_buf[i]); + if (ret) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + goto exit; + } + } + + /* Read sub registers with active IRQs */ + for (i = 0; i < chip->num_main_regs; i++) { + unsigned int b; + const unsigned long mreg = data->main_status_buf[i]; + + for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { + if (i * map->format.val_bytes * 8 + b > + max_main_bits) + break; + ret = read_sub_irq_data(data, b); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + if (chip->runtime_pm) + pm_runtime_put(map->dev); + goto exit; + } + } + + } + } else if (!map->use_single_read && map->reg_stride == 1 && + data->irq_reg_stride == 1) { + u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; u32 *buf32 = data->status_reg_buf; @@ -521,6 +603,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; + if (chip->num_main_regs) { + d->main_status_buf = kcalloc(chip->num_main_regs, + sizeof(unsigned int), + GFP_KERNEL); + + if (!d->main_status_buf) + goto err_alloc; + } + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->status_buf) diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 89ad8dee6ad5..1fad9291f6aa 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -499,6 +499,28 @@ software_node_get_next_child(const struct fwnode_handle *fwnode, return &c->fwnode; } +static struct fwnode_handle * +software_node_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) +{ + struct software_node *swnode = to_software_node(fwnode); + const struct property_entry *prop; + struct software_node *child; + + if (!swnode || list_empty(&swnode->children)) + return NULL; + + list_for_each_entry(child, &swnode->children, entry) { + prop = property_entry_get(child->properties, "name"); + if (!prop) + continue; + if (!strcmp(childname, prop->value.str)) { + kobject_get(&child->kobj); + return &child->fwnode; + } + } + return NULL; +} static const struct fwnode_operations software_node_ops = { .get = software_node_get, @@ -508,6 +530,7 @@ static const struct fwnode_operations software_node_ops = { .property_read_string_array = software_node_read_string_array, .get_parent = software_node_get_parent, .get_next_child_node = software_node_get_next_child, + .get_named_child_node = software_node_get_named_child_node, }; /* -------------------------------------------------------------------------- */ diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c index e7f145d662f0..f4b1d8e54daf 100644 --- a/drivers/base/test/test_async_driver_probe.c +++ b/drivers/base/test/test_async_driver_probe.c @@ -11,16 +11,47 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/time.h> +#include <linux/numa.h> +#include <linux/nodemask.h> +#include <linux/topology.h> #define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */ #define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2) +static atomic_t warnings, errors, timeout, async_completed; + static int test_probe(struct platform_device *pdev) { - dev_info(&pdev->dev, "sleeping for %d msecs in probe\n", - TEST_PROBE_DELAY); - msleep(TEST_PROBE_DELAY); - dev_info(&pdev->dev, "done sleeping\n"); + struct device *dev = &pdev->dev; + + /* + * Determine if we have hit the "timeout" limit for the test if we + * have then report it as an error, otherwise we wil sleep for the + * required amount of time and then report completion. + */ + if (atomic_read(&timeout)) { + dev_err(dev, "async probe took too long\n"); + atomic_inc(&errors); + } else { + dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n", + TEST_PROBE_DELAY); + msleep(TEST_PROBE_DELAY); + dev_dbg(&pdev->dev, "done sleeping\n"); + } + + /* + * Report NUMA mismatch if device node is set and we are not + * performing an async init on that node. + */ + if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) { + if (dev_to_node(dev) != numa_node_id()) { + dev_warn(dev, "NUMA node mismatch %d != %d\n", + dev_to_node(dev), numa_node_id()); + atomic_inc(&warnings); + } + + atomic_inc(&async_completed); + } return 0; } @@ -41,31 +72,64 @@ static struct platform_driver sync_driver = { .probe = test_probe, }; -static struct platform_device *async_dev_1, *async_dev_2; -static struct platform_device *sync_dev_1; +static struct platform_device *async_dev[NR_CPUS * 2]; +static struct platform_device *sync_dev[2]; + +static struct platform_device * +test_platform_device_register_node(char *name, int id, int nid) +{ + struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc(name, id); + if (!pdev) + return NULL; + + if (nid != NUMA_NO_NODE) + set_dev_node(&pdev->dev, nid); + + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + return ERR_PTR(ret); + } + + return pdev; + +} static int __init test_async_probe_init(void) { - ktime_t calltime, delta; + struct platform_device **pdev = NULL; + int async_id = 0, sync_id = 0; unsigned long long duration; - int error; + ktime_t calltime, delta; + int err, nid, cpu; + + pr_info("registering first set of asynchronous devices...\n"); - pr_info("registering first asynchronous device...\n"); + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &async_dev[async_id]; + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_devs; + } - async_dev_1 = platform_device_register_simple("test_async_driver", 1, - NULL, 0); - if (IS_ERR(async_dev_1)) { - error = PTR_ERR(async_dev_1); - pr_err("failed to create async_dev_1: %d\n", error); - return error; + async_id++; } pr_info("registering asynchronous driver...\n"); calltime = ktime_get(); - error = platform_driver_register(&async_driver); - if (error) { - pr_err("Failed to register async_driver: %d\n", error); - goto err_unregister_async_dev_1; + err = platform_driver_register(&async_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_async_devs; } delta = ktime_sub(ktime_get(), calltime); @@ -73,86 +137,163 @@ static int __init test_async_probe_init(void) pr_info("registration took %lld msecs\n", duration); if (duration > TEST_PROBE_THRESHOLD) { pr_err("test failed: probe took too long\n"); - error = -ETIMEDOUT; + err = -ETIMEDOUT; goto err_unregister_async_driver; } - pr_info("registering second asynchronous device...\n"); + pr_info("registering second set of asynchronous devices...\n"); calltime = ktime_get(); - async_dev_2 = platform_device_register_simple("test_async_driver", 2, - NULL, 0); - if (IS_ERR(async_dev_2)) { - error = PTR_ERR(async_dev_2); - pr_err("failed to create async_dev_2: %d\n", error); - goto err_unregister_async_driver; + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_driver; + } + + async_id++; } delta = ktime_sub(ktime_get(), calltime); duration = (unsigned long long) ktime_to_ms(delta); - pr_info("registration took %lld msecs\n", duration); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); if (duration > TEST_PROBE_THRESHOLD) { - pr_err("test failed: probe took too long\n"); - error = -ETIMEDOUT; - goto err_unregister_async_dev_2; + dev_err(&(*pdev)->dev, + "test failed: probe took too long\n"); + err = -ETIMEDOUT; + goto err_unregister_async_driver; } - pr_info("registering synchronous driver...\n"); - error = platform_driver_register(&sync_driver); - if (error) { - pr_err("Failed to register async_driver: %d\n", error); - goto err_unregister_async_dev_2; + pr_info("registering first synchronous device...\n"); + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_async_driver; } - pr_info("registering synchronous device...\n"); + sync_id++; + + pr_info("registering synchronous driver...\n"); calltime = ktime_get(); - sync_dev_1 = platform_device_register_simple("test_sync_driver", 1, - NULL, 0); - if (IS_ERR(sync_dev_1)) { - error = PTR_ERR(sync_dev_1); - pr_err("failed to create sync_dev_1: %d\n", error); - goto err_unregister_sync_driver; + err = platform_driver_register(&sync_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_sync_devs; } delta = ktime_sub(ktime_get(), calltime); duration = (unsigned long long) ktime_to_ms(delta); pr_info("registration took %lld msecs\n", duration); if (duration < TEST_PROBE_THRESHOLD) { - pr_err("test failed: probe was too quick\n"); - error = -ETIMEDOUT; - goto err_unregister_sync_dev_1; + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; } - pr_info("completed successfully"); + pr_info("registering second synchronous device...\n"); + pdev = &sync_dev[sync_id]; + calltime = ktime_get(); - return 0; + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_sync_driver; + } -err_unregister_sync_dev_1: - platform_device_unregister(sync_dev_1); + sync_id++; -err_unregister_sync_driver: - platform_driver_unregister(&sync_driver); + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); + if (duration < TEST_PROBE_THRESHOLD) { + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; + } -err_unregister_async_dev_2: - platform_device_unregister(async_dev_2); + /* + * The async events should have completed while we were taking care + * of the synchronous events. We will now terminate any outstanding + * asynchronous probe calls remaining by forcing timeout and remove + * the driver before we return which should force the flush of the + * pending asynchronous probe calls. + * + * Otherwise if they completed without errors or warnings then + * report successful completion. + */ + if (atomic_read(&async_completed) != async_id) { + pr_err("async events still pending, forcing timeout\n"); + atomic_inc(&timeout); + err = -ETIMEDOUT; + } else if (!atomic_read(&errors) && !atomic_read(&warnings)) { + pr_info("completed successfully\n"); + return 0; + } +err_unregister_sync_driver: + platform_driver_unregister(&sync_driver); +err_unregister_sync_devs: + while (sync_id--) + platform_device_unregister(sync_dev[sync_id]); err_unregister_async_driver: platform_driver_unregister(&async_driver); +err_unregister_async_devs: + while (async_id--) + platform_device_unregister(async_dev[async_id]); + + /* + * If err is already set then count that as an additional error for + * the test. Otherwise we will report an invalid argument error and + * not count that as we should have reached here as a result of + * errors or warnings being reported by the probe routine. + */ + if (err) + atomic_inc(&errors); + else + err = -EINVAL; -err_unregister_async_dev_1: - platform_device_unregister(async_dev_1); + pr_err("Test failed with %d errors and %d warnings\n", + atomic_read(&errors), atomic_read(&warnings)); - return error; + return err; } module_init(test_async_probe_init); static void __exit test_async_probe_exit(void) { + int id = 2; + platform_driver_unregister(&async_driver); platform_driver_unregister(&sync_driver); - platform_device_unregister(async_dev_1); - platform_device_unregister(async_dev_2); - platform_device_unregister(sync_dev_1); + + while (id--) + platform_device_unregister(sync_dev[id]); + + id = NR_CPUS * 2; + while (id--) + platform_device_unregister(async_dev[id]); } module_exit(test_async_probe_exit); |