summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig9
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h15
-rw-r--r--drivers/base/cacheinfo.c200
-rw-r--r--drivers/base/class.c15
-rw-r--r--drivers/base/core.c578
-rw-r--r--drivers/base/dd.c79
-rw-r--r--drivers/base/devcoredump.c10
-rw-r--r--drivers/base/devres.c66
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c178
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/domain.c364
-rw-r--r--drivers/base/power/main.c91
-rw-r--r--drivers/base/power/opp/core.c521
-rw-r--r--drivers/base/power/opp/debugfs.c52
-rw-r--r--drivers/base/power/opp/of.c111
-rw-r--r--drivers/base/power/opp/opp.h23
-rw-r--r--drivers/base/power/power.h29
-rw-r--r--drivers/base/power/qos.c6
-rw-r--r--drivers/base/power/runtime.c236
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c27
-rw-r--r--drivers/base/power/wakeirq.c76
-rw-r--r--drivers/base/power/wakeup.c10
-rw-r--r--drivers/base/regmap/regcache-lzo.c8
-rw-r--r--drivers/base/soc.c79
-rw-r--r--drivers/base/test/Kconfig9
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/base/test/test_async_driver_probe.c169
-rw-r--r--drivers/base/topology.c42
31 files changed, 2552 insertions, 468 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d02e7c0f5bfd..d718ae4b907a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+source "drivers/base/test/Kconfig"
+
config SYS_HYPERVISOR
bool
default n
@@ -237,6 +239,7 @@ config GENERIC_CPU_AUTOPROBE
config SOC_BUS
bool
+ select GLOB
source "drivers/base/regmap/Kconfig"
@@ -250,11 +253,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config FENCE_TRACE
- bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+ bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
- Enable the FENCE_TRACE printks. This will add extra
+ Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba20b396..f2816f6ff76a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-y += test/
+
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e05db388bd1c..ada9dce34e6d 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
+extern struct device_attribute dev_attr_deferred_probe;
+
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index e9fd32e91668..eb3af2739537 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+ const char *size_prop;
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ }, {
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ }, {
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+ if (type == CACHE_TYPE_UNIFIED)
+ return 0;
+ return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *cache_size;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (cache_size)
+ this_leaf->size = of_read_number(cache_size, 1);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf)
+{
+ const __be32 *line_size;
+ int i, lim, ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ const char *propname;
+
+ propname = cache_type_info[ct_idx].line_size_props[i];
+ line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (line_size)
+ break;
+ }
+
+ if (line_size)
+ this_leaf->coherency_line_size = of_read_number(line_size, 1);
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *nr_sets;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ if (nr_sets)
+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+ unsigned int line_size = this_leaf->coherency_line_size;
+ unsigned int nr_sets = this_leaf->number_of_sets;
+ unsigned int size = this_leaf->size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static void cache_of_override_properties(unsigned int cpu)
+{
+ int index;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ cache_size(this_leaf);
+ cache_get_line_size(this_leaf);
+ cache_nr_sets(this_leaf);
+ cache_associativity(this_leaf);
+ }
+}
#else
+static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
- int ret;
+ int ret = 0;
+
+ if (this_cpu_ci->cpu_map_populated)
+ return 0;
- ret = cache_setup_of_node(cpu);
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ /* No cache property/hierarchy support yet in ACPI */
+ ret = -ENOTSUPP;
if (ret)
return ret;
@@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
+static void cache_override_properties(unsigned int cpu)
+{
+ if (of_have_populated_dt())
+ return cache_of_override_properties(cpu);
+}
+
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
- cpu);
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
+
+ cache_override_properties(cpu);
return 0;
free_ci:
@@ -233,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev, \
return sprintf(buf, "%u\n", this_leaf->object); \
}
+show_one(id, id);
show_one(level, level);
show_one(coherency_line_size, coherency_line_size);
show_one(number_of_sets, number_of_sets);
@@ -314,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
return n;
}
+static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(level);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(coherency_line_size);
@@ -327,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
static DEVICE_ATTR_RO(physical_line_partition);
static struct attribute *cache_default_attrs[] = {
+ &dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_level.attr,
&dev_attr_shared_cpu_map.attr,
@@ -350,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
const struct cpumask *mask = &this_leaf->shared_cpu_map;
umode_t mode = attr->mode;
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+ return mode;
if ((attr == &dev_attr_type.attr) && this_leaf->type)
return mode;
if ((attr == &dev_attr_level.attr) && this_leaf->level)
@@ -498,57 +633,30 @@ err:
return rc;
}
-static void cache_remove_dev(unsigned int cpu)
+static int cacheinfo_cpu_online(unsigned int cpu)
{
- if (!cpumask_test_cpu(cpu, &cache_dev_map))
- return;
- cpumask_clear_cpu(cpu, &cache_dev_map);
+ int rc = detect_cache_attributes(cpu);
- cpu_cache_sysfs_exit(cpu);
+ if (rc)
+ return rc;
+ rc = cache_add_dev(cpu);
+ if (rc)
+ free_cache_attributes(cpu);
+ return rc;
}
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
+ if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
+ cpu_cache_sysfs_exit(cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = detect_cache_attributes(cpu);
- if (!rc)
- rc = cache_add_dev(cpu);
- break;
- case CPU_DEAD:
- cache_remove_dev(cpu);
- free_cache_attributes(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ free_cache_attributes(cpu);
+ return 0;
}
static int __init cacheinfo_sysfs_init(void)
{
- int cpu, rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = detect_cache_attributes(cpu);
- if (rc)
- goto out;
- rc = cache_add_dev(cpu);
- if (rc) {
- free_cache_attributes(cpu);
- pr_err("error populating cacheinfo..cpu%d\n", cpu);
- goto out;
- }
- }
- __hotcpu_notifier(cacheinfo_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
}
-
device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 71059e32bebc..a2b2896693d6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,18 @@ static void klist_class_dev_put(struct klist_node *n)
put_device(dev);
}
+static int class_add_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
int __class_register(struct class *cls, struct lock_class_key *key)
{
struct subsys_private *cp;
@@ -203,6 +215,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
kfree(cp);
return error;
}
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
error = add_class_attrs(class_get(cls));
class_put(cls);
return error;
@@ -213,6 +227,7 @@ void class_unregister(struct class *cls)
{
pr_debug("device class '%s': unregistering\n", cls->name);
remove_class_attrs(cls);
+ class_remove_groups(cls, cls->class_groups);
kset_unregister(&cls->p->subsys);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ce057a568673..020ea7f05520 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+ mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+ srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+ down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ down_read(&device_links_lock);
+ return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+ up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc). Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+ struct device_link *link;
+ int ret;
+
+ if (WARN_ON(dev == target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (WARN_ON(link->consumer == target))
+ return 1;
+
+ ret = device_is_dependent(link->consumer, target);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+ struct device_link *link;
+
+ /*
+ * Devices that have not been registered yet will be put to the ends
+ * of the lists during the registration, so skip them here.
+ */
+ if (device_is_registered(dev))
+ devices_kset_move_last(dev);
+
+ if (device_pm_initialized(dev))
+ device_pm_move_last(dev);
+
+ device_for_each_child(dev, NULL, device_reorder_to_tail);
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ device_reorder_to_tail(link->consumer, NULL);
+
+ return 0;
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account. Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+ * when the consumer device driver unbinds from it. The combination of both
+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+ * to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case. The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags)
+{
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+ ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ return NULL;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ /*
+ * If the supplier has not been fully registered yet or there is a
+ * reverse dependency between the consumer and the supplier already in
+ * the graph, return NULL.
+ */
+ if (!device_pm_initialized(supplier)
+ || device_is_dependent(consumer, supplier)) {
+ link = NULL;
+ goto out;
+ }
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node)
+ if (link->consumer == consumer)
+ goto out;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ kfree(link);
+ link = NULL;
+ goto out;
+ }
+ link->rpm_active = true;
+ }
+ pm_runtime_new_link(consumer);
+ }
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+ get_device(consumer);
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS) {
+ link->status = DL_STATE_NONE;
+ } else {
+ switch (supplier->links.status) {
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * Balance the decrementation of the supplier's
+ * runtime PM usage counter after consumer probe
+ * in driver_probe_device().
+ */
+ if (flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(supplier);
+
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ }
+
+ /*
+ * Move the consumer and all of the devices depending on it to the end
+ * of dpm_list and the devices_kset list.
+ *
+ * It is necessary to hold dpm_list locked throughout all that or else
+ * we may end up suspending with a wrong ordering of it.
+ */
+ device_reorder_to_tail(consumer, NULL);
+
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+ dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+ device_pm_unlock();
+ device_links_write_unlock();
+ return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+ device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+ device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_del(struct device_link *link)
+{
+ device_links_write_lock();
+ device_pm_lock();
+ __device_link_del(link);
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node)
+ if (link->status == DL_STATE_CONSUMER_PROBE)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers. Walk the list of the device's
+ * links to suppliers and see if all of them are available. If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here. It only can go away in __device_release_driver() and
+ * that function checks the device's links to consumers. This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int ret = 0;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_links_missing_supplier(dev);
+ ret = -EPROBE_DEFER;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_DORMANT);
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+
+ dev->links.status = DL_DEV_DRIVER_BOUND;
+
+ device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE)
+ __device_link_del(link);
+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+ device_links_write_lock();
+ __device_links_no_driver(dev);
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present). Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = false;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status == DL_STATE_CONSUMER_PROBE
+ || link->status == DL_STATE_ACTIVE) {
+ ret = true;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ }
+
+ dev->links.status = DL_DEV_UNBINDING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state. If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ start:
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ enum device_link_state status;
+
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ status = link->status;
+ if (status == DL_STATE_CONSUMER_PROBE) {
+ device_links_write_unlock();
+
+ wait_for_device_probe();
+ goto start;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ if (status == DL_STATE_ACTIVE) {
+ struct device *consumer = link->consumer;
+
+ get_device(consumer);
+
+ device_links_write_unlock();
+
+ device_release_driver_internal(consumer, NULL,
+ consumer->parent);
+ put_device(consumer);
+ goto start;
+ }
+ }
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ /*
+ * Delete all of the remaining links from this device to any other
+ * devices (either consumers or suppliers).
+ */
+ device_links_write_lock();
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+ __device_link_del(link);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+ __device_link_del(link);
+ }
+
+ device_links_write_unlock();
+}
+
+/* Device links support end. */
+
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
+ error = device_create_file(dev, &dev_attr_deferred_probe);
+ if (error)
+ goto err_remove_online;
+
return 0;
+ err_remove_online:
+ device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
+ INIT_LIST_HEAD(&dev->links.consumers);
+ INIT_LIST_HEAD(&dev->links.suppliers);
+ dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
+
+ device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97a98b6..a8b258e5407b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static ssize_t deferred_probe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool value;
+
+ mutex_lock(&deferred_probe_mutex);
+ value = !list_empty(&dev->p->deferred_probe);
+ mutex_unlock(&deferred_probe_mutex);
+
+ return sprintf(buf, "%d\n", value);
+}
+DEVICE_ATTR_RO(deferred_probe);
+
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@@ -338,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
+ ret = device_links_check_suppliers(dev);
+ if (ret)
+ return ret;
+
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -416,6 +434,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
+ device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@@ -508,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -518,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
+ pm_runtime_put_suppliers(dev);
return ret;
}
@@ -772,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
-static void __device_release_driver(struct device *dev)
+static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@@ -781,7 +802,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
+ while (device_links_busy(dev)) {
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+
+ device_links_unbind_consumers(dev);
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ /*
+ * A concurrent invocation of the same function might
+ * have released the driver successfully while this one
+ * was waiting, so check for that.
+ */
+ if (dev->driver != drv)
+ return;
+ }
+
pm_runtime_get_sync(dev);
+ pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@@ -796,6 +837,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
+
+ device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -812,12 +855,32 @@ static void __device_release_driver(struct device *dev)
}
}
+void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent)
+{
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ if (!drv || drv == dev->driver)
+ __device_release_driver(dev, parent);
+
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+}
+
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@@ -826,9 +889,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
- device_lock(dev);
- __device_release_driver(dev);
- device_unlock(dev);
+ device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@@ -853,15 +914,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (dev->driver == drv)
- __device_release_driver(dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 240374fd1838..7be310f7db73 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -160,18 +160,20 @@ static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(disabled);
-static struct class_attribute devcd_class_attrs[] = {
- __ATTR_RW(disabled),
- __ATTR_NULL
+static struct attribute *devcd_class_attrs[] = {
+ &class_attr_disabled.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
- .class_attrs = devcd_class_attrs,
+ .class_groups = devcd_class_groups,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8fc654f0807b..71d577025285 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/percpu.h>
#include "base.h"
@@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, unsigned long addr)
&devres));
}
EXPORT_SYMBOL_GPL(devm_free_pages);
+
+static void devm_percpu_release(struct device *dev, void *pdata)
+{
+ void __percpu *p;
+
+ p = *(void __percpu **)pdata;
+ free_percpu(p);
+}
+
+static int devm_percpu_match(struct device *dev, void *data, void *p)
+{
+ struct devres *devr = container_of(data, struct devres, data);
+
+ return *(void **)devr->data == p;
+}
+
+/**
+ * __devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @size: Size of per-cpu memory to allocate
+ * @align: Alignment of per-cpu memory to allocate
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align)
+{
+ void *p;
+ void __percpu *pcpu;
+
+ pcpu = __alloc_percpu(size, align);
+ if (!pcpu)
+ return NULL;
+
+ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
+ if (!p) {
+ free_percpu(pcpu);
+ return NULL;
+ }
+
+ *(void __percpu **)p = pcpu;
+
+ devres_add(dev, p);
+
+ return pcpu;
+}
+EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+
+/**
+ * devm_free_percpu - Resource-managed free_percpu
+ * @dev: Device this memory belongs to
+ * @pdata: Per-cpu memory to free
+ *
+ * Free memory allocated with devm_alloc_percpu().
+ */
+void devm_free_percpu(struct device *dev, void __percpu *pdata)
+{
+ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+ (void *)pdata));
+}
+EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c80986..efd71cf4fdea 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -108,13 +108,13 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
- * Managed dma_alloc_non_coherent(). Memory allocated using this
+ * Managed dma_alloc_noncoherent(). Memory allocated using this
* function will be automatically released on driver detach.
*
* RETURNS:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760a4278..4497d263209f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,6 +30,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -91,10 +92,11 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-enum {
+enum fw_status {
+ FW_STATUS_UNKNOWN,
FW_STATUS_LOADING,
FW_STATUS_DONE,
- FW_STATUS_ABORT,
+ FW_STATUS_ABORTED,
};
static int loading_timeout = 60; /* In seconds */
@@ -104,6 +106,82 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct swait_queue_head wq;
+ enum fw_status status;
+};
+
+static void fw_state_init(struct fw_state *fw_st)
+{
+ init_swait_queue_head(&fw_st->wq);
+ fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline bool __fw_state_is_done(enum fw_status status)
+{
+ return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
+}
+
+static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+{
+ long ret;
+
+ ret = swait_event_interruptible_timeout(fw_st->wq,
+ __fw_state_is_done(READ_ONCE(fw_st->status)),
+ timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static void __fw_state_set(struct fw_state *fw_st,
+ enum fw_status status)
+{
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ swake_up(&fw_st->wq);
+}
+
+#define fw_state_start(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_LOADING)
+#define fw_state_done(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_wait(fw_st) \
+ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+#ifndef CONFIG_FW_LOADER_USER_HELPER
+
+#define fw_state_is_aborted(fw_st) false
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+
+static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+{
+ return fw_st->status == status;
+}
+
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+#define fw_state_is_done(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_DONE)
+#define fw_state_is_loading(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_LOADING)
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+#define fw_state_wait_timeout(fw_st, timeout) \
+ __fw_state_wait_common(fw_st, timeout)
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
/* firmware behavior options */
#define FW_OPT_UEVENT (1U << 0)
#define FW_OPT_NOWAIT (1U << 1)
@@ -145,9 +223,8 @@ struct firmware_cache {
struct firmware_buf {
struct kref ref;
struct list_head list;
- struct completion completion;
struct firmware_cache *fwc;
- unsigned long status;
+ struct fw_state fw_st;
void *data;
size_t size;
size_t allocated_size;
@@ -205,7 +282,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
buf->fwc = fwc;
buf->data = dbuf;
buf->allocated_size = size;
- init_completion(&buf->completion);
+ fw_state_init(&buf->fw_st);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
#endif
@@ -305,15 +382,6 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static void fw_finish_direct_load(struct device *device,
- struct firmware_buf *buf)
-{
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
-}
-
static int
fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
@@ -360,7 +428,7 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
}
dev_dbg(device, "direct-loading %s\n", buf->fw_id);
buf->size = size;
- fw_finish_direct_load(device, buf);
+ fw_state_done(&buf->fw_st);
break;
}
__putname(path);
@@ -478,12 +546,11 @@ static void __fw_load_abort(struct firmware_buf *buf)
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
- if (test_bit(FW_STATUS_DONE, &buf->status))
+ if (fw_state_is_done(&buf->fw_st))
return;
list_del_init(&buf->pending_list);
- set_bit(FW_STATUS_ABORT, &buf->status);
- complete_all(&buf->completion);
+ fw_state_aborted(&buf->fw_st);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
@@ -496,9 +563,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
fw_priv->buf = NULL;
}
-#define is_fw_load_aborted(buf) \
- test_bit(FW_STATUS_ABORT, &(buf)->status)
-
static LIST_HEAD(pending_fw_head);
/* reboot notifier for avoid deadlock with usermode_lock */
@@ -546,11 +610,13 @@ static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(timeout);
-static struct class_attribute firmware_class_attrs[] = {
- __ATTR_RW(timeout),
- __ATTR_NULL
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(firmware_class);
static void fw_dev_release(struct device *dev)
{
@@ -585,7 +651,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct class firmware_class = {
.name = "firmware",
- .class_attrs = firmware_class_attrs,
+ .class_groups = firmware_class_groups,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -598,7 +664,7 @@ static ssize_t firmware_loading_show(struct device *dev,
mutex_lock(&fw_lock);
if (fw_priv->buf)
- loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ loading = fw_state_is_loading(&fw_priv->buf->fw_st);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
@@ -653,23 +719,20 @@ static ssize_t firmware_loading_store(struct device *dev,
switch (loading) {
case 1:
/* discarding any previous partial load */
- if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ if (!fw_state_is_done(&fw_buf->fw_st)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ fw_state_start(&fw_buf->fw_st);
}
break;
case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+ if (fw_state_is_loading(&fw_buf->fw_st)) {
int rc;
- set_bit(FW_STATUS_DONE, &fw_buf->status);
- clear_bit(FW_STATUS_LOADING, &fw_buf->status);
-
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
@@ -691,10 +754,11 @@ static ssize_t firmware_loading_store(struct device *dev,
*/
list_del_init(&fw_buf->pending_list);
if (rc) {
- set_bit(FW_STATUS_ABORT, &fw_buf->status);
+ fw_state_aborted(&fw_buf->fw_st);
written = rc;
+ } else {
+ fw_state_done(&fw_buf->fw_st);
}
- complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
@@ -755,7 +819,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
ret_count = -ENODEV;
goto out;
}
@@ -842,7 +906,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
retval = -ENODEV;
goto out;
}
@@ -955,17 +1019,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
timeout = MAX_JIFFY_OFFSET;
}
- retval = wait_for_completion_interruptible_timeout(&buf->completion,
- timeout);
- if (retval == -ERESTARTSYS || !retval) {
+ retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+ if (retval < 0) {
mutex_lock(&fw_lock);
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
- } else if (retval > 0) {
- retval = 0;
}
- if (is_fw_load_aborted(buf))
+ if (fw_state_is_aborted(&buf->fw_st))
retval = -EAGAIN;
else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
@@ -1015,35 +1076,12 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
return -ENOENT;
}
-/* No abort during direct loading */
-#define is_fw_load_aborted(buf) false
-
#ifdef CONFIG_PM_SLEEP
static inline void kill_requests_without_uevent(void) { }
#endif
#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
-/* wait until the shared firmware_buf becomes ready (or error) */
-static int sync_cached_firmware_buf(struct firmware_buf *buf)
-{
- int ret = 0;
-
- mutex_lock(&fw_lock);
- while (!test_bit(FW_STATUS_DONE, &buf->status)) {
- if (is_fw_load_aborted(buf)) {
- ret = -ENOENT;
- break;
- }
- mutex_unlock(&fw_lock);
- ret = wait_for_completion_interruptible(&buf->completion);
- mutex_lock(&fw_lock);
- }
- mutex_unlock(&fw_lock);
- return ret;
-}
-
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
@@ -1077,7 +1115,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
firmware->priv = buf;
if (ret > 0) {
- ret = sync_cached_firmware_buf(buf);
+ ret = fw_state_wait(&buf->fw_st);
if (!ret) {
fw_set_page_data(buf, firmware);
return 0; /* assigned */
@@ -1095,7 +1133,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
struct firmware_buf *buf = fw->priv;
mutex_lock(&fw_lock);
- if (!buf->size || is_fw_load_aborted(buf)) {
+ if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
@@ -1345,9 +1383,9 @@ static void request_firmware_work_func(struct work_struct *work)
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 62c63c0c5c22..8ab8ea1253e6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static DEFINE_MUTEX(mem_sysfs_mutex);
@@ -226,11 +226,9 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
int ret;
start_pfn = section_nr_to_pfn(phys_index);
- first_page = pfn_to_page(start_pfn);
switch (action) {
case MEM_ONLINE:
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e023066e4215..a5e1262b964b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -39,6 +39,105 @@
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
+struct genpd_lock_ops {
+ void (*lock)(struct generic_pm_domain *genpd);
+ void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+ int (*lock_interruptible)(struct generic_pm_domain *genpd);
+ void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+ mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+ int depth)
+{
+ mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+ .lock = genpd_lock_mtx,
+ .lock_nested = genpd_lock_nested_mtx,
+ .lock_interruptible = genpd_lock_interruptible_mtx,
+ .unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+ genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->slock)
+{
+ spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+ .lock = genpd_lock_spin,
+ .lock_nested = genpd_lock_nested_spin,
+ .lock_interruptible = genpd_lock_interruptible_spin,
+ .unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p) p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p) p->lock_ops->unlock(p)
+
+#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ bool ret;
+
+ ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+ /* Warn once for each IRQ safe dev in no sleep domain */
+ if (ret)
+ dev_warn_once(dev, "PM domain %s will not be powered off\n",
+ genpd->name);
+
+ return ret;
+}
+
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
@@ -200,9 +299,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
genpd_sd_counter_inc(master);
- mutex_lock_nested(&master->lock, depth + 1);
+ genpd_lock_nested(master, depth + 1);
ret = genpd_poweron(master, depth + 1);
- mutex_unlock(&master->lock);
+ genpd_unlock(master);
if (ret) {
genpd_sd_counter_dec(master);
@@ -255,9 +354,9 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
spin_unlock_irq(&dev->power.lock);
if (!IS_ERR(genpd)) {
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->max_off_time_changed = true;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
dev = dev->parent;
@@ -303,7 +402,12 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
- if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
@@ -354,9 +458,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd_poweroff(genpd, true);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
/**
@@ -440,6 +544,7 @@ static int genpd_runtime_suspend(struct device *dev)
return -EBUSY;
/* Measure suspend latency. */
+ time_start = 0;
if (runtime_pm)
time_start = ktime_get();
@@ -466,15 +571,15 @@ static int genpd_runtime_suspend(struct device *dev)
}
/*
- * If power.irq_safe is set, this routine will be run with interrupts
- * off, so it can't use mutexes.
+ * If power.irq_safe is set, this routine may be run with
+ * IRQs disabled, so suspend only if the PM domain also is irq_safe.
*/
- if (dev->power.irq_safe)
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
return 0;
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd_poweroff(genpd, false);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
return 0;
}
@@ -503,15 +608,18 @@ static int genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- /* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe) {
+ /*
+ * As we don't power off a non IRQ safe domain, which holds
+ * an IRQ safe device, we don't need to restore power to it.
+ */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
timed = false;
goto out;
}
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
ret = genpd_poweron(genpd, 0);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
if (ret)
return ret;
@@ -546,10 +654,11 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!dev->power.irq_safe) {
- mutex_lock(&genpd->lock);
+ if (!pm_runtime_is_irq_safe(dev) ||
+ (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ genpd_lock(genpd);
genpd_poweroff(genpd, 0);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
return ret;
@@ -732,20 +841,20 @@ static int pm_genpd_prepare(struct device *dev)
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
if (ret) {
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->prepared_count--;
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
return ret;
@@ -936,13 +1045,13 @@ static void pm_genpd_complete(struct device *dev)
pm_generic_complete(dev);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
genpd->prepared_count--;
if (!genpd->prepared_count)
genpd_queue_power_off_work(genpd);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
}
/**
@@ -1071,7 +1180,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1088,7 +1197,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
if (ret)
genpd_free_dev_data(dev, gpd_data);
@@ -1130,7 +1239,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1145,14 +1254,14 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
list_del_init(&pdd->list_node);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
@@ -1183,12 +1292,23 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|| genpd == subdomain)
return -EINVAL;
+ /*
+ * If the domain can be powered on/off in an IRQ safe
+ * context, ensure that the subdomain can also be
+ * powered on/off in that context.
+ */
+ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+ WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+ genpd->name, subdomain->name);
+ return -EINVAL;
+ }
+
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
- mutex_lock(&subdomain->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1211,8 +1331,8 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
genpd_sd_counter_inc(genpd);
out:
- mutex_unlock(&genpd->lock);
- mutex_unlock(&subdomain->lock);
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
if (ret)
kfree(link);
return ret;
@@ -1250,8 +1370,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- mutex_lock(&subdomain->lock);
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1275,13 +1395,39 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
out:
- mutex_unlock(&genpd->lock);
- mutex_unlock(&subdomain->lock);
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+ struct genpd_power_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ genpd->states = state;
+ genpd->state_count = 1;
+ genpd->free = state;
+
+ return 0;
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+ if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_ops = &genpd_spin_ops;
+ } else {
+ mutex_init(&genpd->mlock);
+ genpd->lock_ops = &genpd_mtx_ops;
+ }
+}
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -1293,13 +1439,15 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off)
{
+ int ret;
+
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links);
INIT_LIST_HEAD(&genpd->dev_list);
- mutex_init(&genpd->lock);
+ genpd_lock_init(genpd);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
@@ -1325,19 +1473,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->dev_ops.start = pm_clk_resume;
}
- if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
- pr_warn("Initial state index out of bounds.\n");
- genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
- }
-
- if (genpd->state_count > GENPD_MAX_NUM_STATES) {
- pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
- genpd->state_count = GENPD_MAX_NUM_STATES;
- }
-
/* Use only one "off" state if there were no states declared */
- if (genpd->state_count == 0)
- genpd->state_count = 1;
+ if (genpd->state_count == 0) {
+ ret = genpd_set_default_power_state(genpd);
+ if (ret)
+ return ret;
+ }
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
@@ -1354,16 +1495,16 @@ static int genpd_remove(struct generic_pm_domain *genpd)
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ genpd_lock(genpd);
if (genpd->has_provider) {
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
pr_err("Provider present, unable to remove %s\n", genpd->name);
return -EBUSY;
}
if (!list_empty(&genpd->master_links) || genpd->device_count) {
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
@@ -1375,8 +1516,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
}
list_del(&genpd->gpd_list_node);
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
+ kfree(genpd->free);
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -1890,21 +2032,117 @@ int genpd_dev_pm_attach(struct device *dev)
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to add to PM domain %s: %d",
+ pd->name, ret);
goto out;
}
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- mutex_lock(&pd->lock);
+ genpd_lock(pd);
ret = genpd_poweron(pd, 0);
- mutex_unlock(&pd->lock);
+ genpd_unlock(pd);
out:
return ret ? -EPROBE_DEFER : 0;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+static const struct of_device_id idle_state_match[] = {
+ { .compatible = "domain-idle-state", },
+ { }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+ struct device_node *state_node)
+{
+ int err;
+ u32 residency;
+ u32 entry_latency, exit_latency;
+ const struct of_device_id *match_id;
+
+ match_id = of_match_node(idle_state_match, state_node);
+ if (!match_id)
+ return -EINVAL;
+
+ err = of_property_read_u32(state_node, "entry-latency-us",
+ &entry_latency);
+ if (err) {
+ pr_debug(" * %s missing entry-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "exit-latency-us",
+ &exit_latency);
+ if (err) {
+ pr_debug(" * %s missing exit-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ if (!err)
+ genpd_state->residency_ns = 1000 * residency;
+
+ genpd_state->power_on_latency_ns = 1000 * exit_latency;
+ genpd_state->power_off_latency_ns = 1000 * entry_latency;
+ genpd_state->fwnode = &state_node->fwnode;
+
+ return 0;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n)
+{
+ struct genpd_power_state *st;
+ struct device_node *np;
+ int i = 0;
+ int err, ret;
+ int count;
+ struct of_phandle_iterator it;
+
+ count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+ if (count <= 0)
+ return -EINVAL;
+
+ st = kcalloc(count, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
+ np = it.node;
+ ret = genpd_parse_state(&st[i++], np);
+ if (ret) {
+ pr_err
+ ("Parsing idle state node %s failed with err %d\n",
+ np->full_name, ret);
+ of_node_put(np);
+ kfree(st);
+ return ret;
+ }
+ }
+
+ *n = count;
+ *states = st;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -1958,7 +2196,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
char state[16];
int ret;
- ret = mutex_lock_interruptible(&genpd->lock);
+ ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
@@ -1984,7 +2222,9 @@ static int pm_genpd_summary_one(struct seq_file *s,
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
@@ -1995,7 +2235,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
seq_puts(s, "\n");
exit:
- mutex_unlock(&genpd->lock);
+ genpd_unlock(genpd);
return 0;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2932a5bd892f..249e0304597f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@@ -192,7 +194,7 @@ void device_pm_move_last(struct device *dev)
static ktime_t initcall_debug_start(struct device *dev)
{
- ktime_t calltime = ktime_set(0, 0);
+ ktime_t calltime = 0;
if (pm_print_times_enabled) {
pr_info("calling %s+ @ %i, parent: %s\n",
@@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+ dpm_wait(dev->parent, async);
+ dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1027,7 +1085,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1174,7 +1232,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static void dpm_clear_suppliers_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1454,16 +1528,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
+ dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);
dpm_watchdog_clear(&wd);
Complete:
- complete_all(&dev->power.completion);
if (error)
async_error = error;
+ complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
return error;
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 4c7c6da7a989..35ff06283738 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -93,6 +93,8 @@ struct opp_table *_find_opp_table(struct device *dev)
* Return: voltage in micro volt corresponding to the opp, else
* return 0
*
+ * This is useful only for devices with single power supply.
+ *
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. This means that opp which could have been fetched by
* opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
@@ -112,7 +114,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
if (IS_ERR_OR_NULL(tmp_opp))
pr_err("%s: Invalid parameters\n", __func__);
else
- v = tmp_opp->u_volt;
+ v = tmp_opp->supplies[0].u_volt;
return v;
}
@@ -210,6 +212,24 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+static int _get_regulator_count(struct device *dev)
+{
+ struct opp_table *opp_table;
+ int count;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ count = opp_table->regulator_count;
+ else
+ count = 0;
+
+ rcu_read_unlock();
+
+ return count;
+}
+
/**
* dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
* @dev: device for which we do this operation
@@ -222,34 +242,51 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
- struct regulator *reg;
+ struct regulator *reg, **regulators;
unsigned long latency_ns = 0;
- unsigned long min_uV = ~0, max_uV = 0;
- int ret;
+ int ret, i, count;
+ struct {
+ unsigned long min;
+ unsigned long max;
+ } *uV;
+
+ count = _get_regulator_count(dev);
+
+ /* Regulator may not be required for the device */
+ if (!count)
+ return 0;
+
+ regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
+ if (!regulators)
+ return 0;
+
+ uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
+ if (!uV)
+ goto free_regulators;
rcu_read_lock();
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
rcu_read_unlock();
- return 0;
+ goto free_uV;
}
- reg = opp_table->regulator;
- if (IS_ERR(reg)) {
- /* Regulator may not be required for device */
- rcu_read_unlock();
- return 0;
- }
+ memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
- list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
- if (!opp->available)
- continue;
+ for (i = 0; i < count; i++) {
+ uV[i].min = ~0;
+ uV[i].max = 0;
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
- if (opp->u_volt_min < min_uV)
- min_uV = opp->u_volt_min;
- if (opp->u_volt_max > max_uV)
- max_uV = opp->u_volt_max;
+ if (opp->supplies[i].u_volt_min < uV[i].min)
+ uV[i].min = opp->supplies[i].u_volt_min;
+ if (opp->supplies[i].u_volt_max > uV[i].max)
+ uV[i].max = opp->supplies[i].u_volt_max;
+ }
}
rcu_read_unlock();
@@ -258,9 +295,16 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
* The caller needs to ensure that opp_table (and hence the regulator)
* isn't freed, while we are executing this routine.
*/
- ret = regulator_set_voltage_time(reg, min_uV, max_uV);
- if (ret > 0)
- latency_ns = ret * 1000;
+ for (i = 0; reg = regulators[i], i < count; i++) {
+ ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
+ if (ret > 0)
+ latency_ns += ret * 1000;
+ }
+
+free_uV:
+ kfree(uV);
+free_regulators:
+ kfree(regulators);
return latency_ns;
}
@@ -542,8 +586,7 @@ unlock:
}
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
- unsigned long u_volt, unsigned long u_volt_min,
- unsigned long u_volt_max)
+ struct dev_pm_opp_supply *supply)
{
int ret;
@@ -554,14 +597,78 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return 0;
}
- dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
- u_volt, u_volt_max);
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
+ supply->u_volt_min, supply->u_volt, supply->u_volt_max);
- ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
- u_volt_max);
+ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
+ supply->u_volt, supply->u_volt_max);
if (ret)
dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
- __func__, u_volt_min, u_volt, u_volt_max, ret);
+ __func__, supply->u_volt_min, supply->u_volt,
+ supply->u_volt_max, ret);
+
+ return ret;
+}
+
+static inline int
+_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+ unsigned long old_freq, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ }
+
+ return ret;
+}
+
+static int _generic_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
+ struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct regulator *reg = data->regulators[0];
+ struct device *dev= data->dev;
+ int ret;
+
+ /* This function only supports single regulator per device */
+ if (WARN_ON(data->regulator_count > 1)) {
+ dev_err(dev, "multiple regulators are not supported\n");
+ return -EINVAL;
+ }
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
+ if (ret)
+ goto restore_voltage;
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply->u_volt)
+ _set_opp_voltage(dev, reg, old_supply);
return ret;
}
@@ -579,12 +686,13 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
+ unsigned long freq, old_freq;
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_opp *old_opp, *opp;
- struct regulator *reg;
+ struct regulator **regulators;
+ struct dev_pm_set_opp_data *data;
struct clk *clk;
- unsigned long freq, old_freq;
- unsigned long u_volt, u_volt_min, u_volt_max;
- int ret;
+ int ret, size;
if (unlikely(!target_freq)) {
dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
@@ -633,55 +741,41 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return ret;
}
- u_volt = opp->u_volt;
- u_volt_min = opp->u_volt_min;
- u_volt_max = opp->u_volt_max;
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
+ old_freq, freq);
- reg = opp_table->regulator;
+ regulators = opp_table->regulators;
- rcu_read_unlock();
-
- /* Scaling up? Scale voltage before frequency */
- if (freq > old_freq) {
- ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
- u_volt_max);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
-
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
+ /* Only frequency scaling */
+ if (!regulators) {
+ rcu_read_unlock();
+ return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
}
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
- ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
- u_volt_max);
- if (ret)
- goto restore_freq;
- }
+ if (opp_table->set_opp)
+ set_opp = opp_table->set_opp;
+ else
+ set_opp = _generic_set_opp;
+
+ data = opp_table->set_opp_data;
+ data->regulators = regulators;
+ data->regulator_count = opp_table->regulator_count;
+ data->clk = clk;
+ data->dev = dev;
+
+ data->old_opp.rate = old_freq;
+ size = sizeof(*opp->supplies) * opp_table->regulator_count;
+ if (IS_ERR(old_opp))
+ memset(data->old_opp.supplies, 0, size);
+ else
+ memcpy(data->old_opp.supplies, old_opp->supplies, size);
- return 0;
+ data->new_opp.rate = freq;
+ memcpy(data->new_opp.supplies, opp->supplies, size);
-restore_freq:
- if (clk_set_rate(clk, old_freq))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- if (!IS_ERR(old_opp))
- _set_opp_voltage(dev, reg, old_opp->u_volt,
- old_opp->u_volt_min, old_opp->u_volt_max);
+ rcu_read_unlock();
- return ret;
+ return set_opp(data);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
@@ -764,9 +858,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
_of_init_opp_table(opp_table, dev);
- /* Set regulator to a non-NULL error value */
- opp_table->regulator = ERR_PTR(-ENXIO);
-
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
if (IS_ERR(opp_table->clk)) {
@@ -815,7 +906,10 @@ static void _remove_opp_table(struct opp_table *opp_table)
if (opp_table->prop_name)
return;
- if (!IS_ERR(opp_table->regulator))
+ if (opp_table->regulators)
+ return;
+
+ if (opp_table->set_opp)
return;
/* Release clk */
@@ -924,34 +1018,50 @@ struct dev_pm_opp *_allocate_opp(struct device *dev,
struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
+ int count, supply_size;
+ struct opp_table *table;
- /* allocate new OPP node */
- opp = kzalloc(sizeof(*opp), GFP_KERNEL);
- if (!opp)
+ table = _add_opp_table(dev);
+ if (!table)
return NULL;
- INIT_LIST_HEAD(&opp->node);
+ /* Allocate space for at least one supply */
+ count = table->regulator_count ? table->regulator_count : 1;
+ supply_size = sizeof(*opp->supplies) * count;
- *opp_table = _add_opp_table(dev);
- if (!*opp_table) {
- kfree(opp);
+ /* allocate new OPP node and supplies structures */
+ opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+ if (!opp) {
+ kfree(table);
return NULL;
}
+ /* Put the supplies at the end of the OPP structure as an empty array */
+ opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+ INIT_LIST_HEAD(&opp->node);
+
+ *opp_table = table;
+
return opp;
}
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
struct opp_table *opp_table)
{
- struct regulator *reg = opp_table->regulator;
-
- if (!IS_ERR(reg) &&
- !regulator_is_supported_voltage(reg, opp->u_volt_min,
- opp->u_volt_max)) {
- pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
- __func__, opp->u_volt_min, opp->u_volt_max);
- return false;
+ struct regulator *reg;
+ int i;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+
+ if (!regulator_is_supported_voltage(reg,
+ opp->supplies[i].u_volt_min,
+ opp->supplies[i].u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->supplies[i].u_volt_min,
+ opp->supplies[i].u_volt_max);
+ return false;
+ }
}
return true;
@@ -983,11 +1093,13 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->u_volt, opp->available,
- new_opp->rate, new_opp->u_volt, new_opp->available);
+ __func__, opp->rate, opp->supplies[0].u_volt,
+ opp->available, new_opp->rate,
+ new_opp->supplies[0].u_volt, new_opp->available);
- return opp->available && new_opp->u_volt == opp->u_volt ?
- 0 : -EEXIST;
+ /* Should we compare voltages for all regulators here ? */
+ return opp->available &&
+ new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
}
new_opp->opp_table = opp_table;
@@ -1054,9 +1166,9 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
- new_opp->u_volt = u_volt;
- new_opp->u_volt_min = u_volt - tol;
- new_opp->u_volt_max = u_volt + tol;
+ new_opp->supplies[0].u_volt = u_volt;
+ new_opp->supplies[0].u_volt_min = u_volt - tol;
+ new_opp->supplies[0].u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
@@ -1300,13 +1412,47 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+static int _allocate_set_opp_data(struct opp_table *opp_table)
+{
+ struct dev_pm_set_opp_data *data;
+ int len, count = opp_table->regulator_count;
+
+ if (WARN_ON(!count))
+ return -EINVAL;
+
+ /* space for set_opp_data */
+ len = sizeof(*data);
+
+ /* space for old_opp.supplies and new_opp.supplies */
+ len += 2 * sizeof(struct dev_pm_opp_supply) * count;
+
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->old_opp.supplies = (void *)(data + 1);
+ data->new_opp.supplies = data->old_opp.supplies + count;
+
+ opp_table->set_opp_data = data;
+
+ return 0;
+}
+
+static void _free_set_opp_data(struct opp_table *opp_table)
+{
+ kfree(opp_table->set_opp_data);
+ opp_table->set_opp_data = NULL;
+}
+
/**
- * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * dev_pm_opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
- * @name: Name of the regulator.
+ * @names: Array of pointers to the names of the regulator.
+ * @count: Number of regulators.
*
* In order to support OPP switching, OPP layer needs to know the name of the
- * device's regulator, as the core would be required to switch voltages as well.
+ * device's regulators, as the core would be required to switch voltages as
+ * well.
*
* This must be called before any OPPs are initialized for the device.
*
@@ -1316,11 +1462,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[],
+ unsigned int count)
{
struct opp_table *opp_table;
struct regulator *reg;
- int ret;
+ int ret, i;
mutex_lock(&opp_table_lock);
@@ -1336,22 +1484,146 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
goto err;
}
- /* Already have a regulator set */
- if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ /* Already have regulators set */
+ if (opp_table->regulators) {
ret = -EBUSY;
goto err;
}
- /* Allocate the regulator */
- reg = regulator_get_optional(dev, name);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%s: no regulator (%s) found: %d\n",
- __func__, name, ret);
+
+ opp_table->regulators = kmalloc_array(count,
+ sizeof(*opp_table->regulators),
+ GFP_KERNEL);
+ if (!opp_table->regulators) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < count; i++) {
+ reg = regulator_get_optional(dev, names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, names[i], ret);
+ goto free_regulators;
+ }
+
+ opp_table->regulators[i] = reg;
+ }
+
+ opp_table->regulator_count = count;
+
+ /* Allocate block only once to pass to set_opp() routines */
+ ret = _allocate_set_opp_data(opp_table);
+ if (ret)
+ goto free_regulators;
+
+ mutex_unlock(&opp_table_lock);
+ return opp_table;
+
+free_regulators:
+ while (i != 0)
+ regulator_put(opp_table->regulators[--i]);
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+ opp_table->regulator_count = 0;
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
+
+/**
+ * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+{
+ int i;
+
+ mutex_lock(&opp_table_lock);
+
+ if (!opp_table->regulators) {
+ pr_err("%s: Doesn't have regulators set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
+ regulator_put(opp_table->regulators[i]);
+
+ _free_set_opp_data(opp_table);
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+ opp_table->regulator_count = 0;
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
+
+/**
+ * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is useful to support complex platforms (like platforms with multiple
+ * regulators per device), instead of the generic OPP set rate helper.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ struct opp_table *opp_table;
+ int ret;
+
+ if (!set_opp)
+ return -EINVAL;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
goto err;
}
- opp_table->regulator = reg;
+ /* Already have custom set_opp helper */
+ if (WARN_ON(opp_table->set_opp)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->set_opp = set_opp;
mutex_unlock(&opp_table_lock);
return 0;
@@ -1363,11 +1635,12 @@ unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
/**
- * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
- * @dev: Device for which regulator was set.
+ * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
+ * set_opp helper
+ * @dev: Device for which custom set_opp helper has to be cleared.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -1375,7 +1648,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-void dev_pm_opp_put_regulator(struct device *dev)
+void dev_pm_opp_register_put_opp_helper(struct device *dev)
{
struct opp_table *opp_table;
@@ -1389,16 +1662,16 @@ void dev_pm_opp_put_regulator(struct device *dev)
goto unlock;
}
- if (IS_ERR(opp_table->regulator)) {
- dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ if (!opp_table->set_opp) {
+ dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
+ __func__);
goto unlock;
}
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- regulator_put(opp_table->regulator);
- opp_table->regulator = ERR_PTR(-ENXIO);
+ opp_table->set_opp = NULL;
/* Try freeing opp_table if this was the last blocking resource */
_remove_opp_table(opp_table);
@@ -1406,7 +1679,7 @@ void dev_pm_opp_put_regulator(struct device *dev)
unlock:
mutex_unlock(&opp_table_lock);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index ef1ae6b52042..95f433db4ac7 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/limits.h>
+#include <linux/slab.h>
#include "opp.h"
@@ -34,6 +35,46 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
+static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ struct dentry *d;
+ int i = 0;
+ char *name;
+
+ /* Always create at least supply-0 directory */
+ do {
+ name = kasprintf(GFP_KERNEL, "supply-%d", i);
+
+ /* Create per-opp directory */
+ d = debugfs_create_dir(name, pdentry);
+
+ kfree(name);
+
+ if (!d)
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+ &opp->supplies[i].u_volt))
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+ &opp->supplies[i].u_volt_min))
+ return false;
+
+ if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+ &opp->supplies[i].u_volt_max))
+ return false;
+
+ if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
+ &opp->supplies[i].u_amp))
+ return false;
+ } while (++i < opp_table->regulator_count);
+
+ return true;
+}
+
int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
@@ -63,16 +104,7 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
return -ENOMEM;
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+ if (!opp_debug_create_supplies(opp, opp_table, d))
return -ENOMEM;
if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 5552211e6fcd..3f7d2591b173 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <linux/export.h>
#include "opp.h"
@@ -101,16 +102,16 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
return true;
}
-/* TODO: Support multiple regulators */
static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
struct opp_table *opp_table)
{
- u32 microvolt[3] = {0};
- u32 val;
- int count, ret;
+ u32 *microvolt, *microamp = NULL;
+ int supplies, vcount, icount, ret, i, j;
struct property *prop = NULL;
char name[NAME_MAX];
+ supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
+
/* Search for "opp-microvolt-<name>" */
if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
@@ -128,34 +129,29 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
return 0;
}
- count = of_property_count_u32_elems(opp->np, name);
- if (count < 0) {
+ vcount = of_property_count_u32_elems(opp->np, name);
+ if (vcount < 0) {
dev_err(dev, "%s: Invalid %s property (%d)\n",
- __func__, name, count);
- return count;
+ __func__, name, vcount);
+ return vcount;
}
- /* There can be one or three elements here */
- if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
- __func__, name, count);
+ /* There can be one or three elements per supply */
+ if (vcount != supplies && vcount != supplies * 3) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+ __func__, name, vcount, supplies);
return -EINVAL;
}
- ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+ microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
+ if (!microvolt)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
if (ret) {
dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
- return -EINVAL;
- }
-
- opp->u_volt = microvolt[0];
-
- if (count == 1) {
- opp->u_volt_min = opp->u_volt;
- opp->u_volt_max = opp->u_volt;
- } else {
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
+ ret = -EINVAL;
+ goto free_microvolt;
}
/* Search for "opp-microamp-<name>" */
@@ -172,10 +168,59 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
prop = of_find_property(opp->np, name, NULL);
}
- if (prop && !of_property_read_u32(opp->np, name, &val))
- opp->u_amp = val;
+ if (prop) {
+ icount = of_property_count_u32_elems(opp->np, name);
+ if (icount < 0) {
+ dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
+ name, icount);
+ ret = icount;
+ goto free_microvolt;
+ }
- return 0;
+ if (icount != supplies) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+ __func__, name, icount, supplies);
+ ret = -EINVAL;
+ goto free_microvolt;
+ }
+
+ microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
+ if (!microamp) {
+ ret = -EINVAL;
+ goto free_microvolt;
+ }
+
+ ret = of_property_read_u32_array(opp->np, name, microamp,
+ icount);
+ if (ret) {
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__,
+ name, ret);
+ ret = -EINVAL;
+ goto free_microamp;
+ }
+ }
+
+ for (i = 0, j = 0; i < supplies; i++) {
+ opp->supplies[i].u_volt = microvolt[j++];
+
+ if (vcount == supplies) {
+ opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
+ opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
+ } else {
+ opp->supplies[i].u_volt_min = microvolt[j++];
+ opp->supplies[i].u_volt_max = microvolt[j++];
+ }
+
+ if (microamp)
+ opp->supplies[i].u_amp = microamp[i];
+ }
+
+free_microamp:
+ kfree(microamp);
+free_microvolt:
+ kfree(microvolt);
+
+ return ret;
}
/**
@@ -198,7 +243,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
{
/*
* TODO: Support for multiple OPP tables.
@@ -303,9 +348,9 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
- __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
- new_opp->u_volt_min, new_opp->u_volt_max,
- new_opp->clock_latency_ns);
+ __func__, new_opp->turbo, new_opp->rate,
+ new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
+ new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
/*
* Notify the changes in the availability of the operable
@@ -562,7 +607,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
np = _of_get_opp_desc_node(cpu_dev);
if (!np) {
- dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
return -ENOENT;
}
@@ -587,7 +632,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
tmp_np = _of_get_opp_desc_node(tcpu_dev);
if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
__func__);
ret = -ENOENT;
goto put_cpu_node;
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index fabd5ca1a083..af9f2b849a66 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -61,10 +61,7 @@ extern struct list_head opp_tables;
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
* @rate: Frequency in hertz
- * @u_volt: Target voltage in microvolts corresponding to this OPP
- * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
- * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
- * @u_amp: Maximum current drawn by the device in microamperes
+ * @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
* @opp_table: points back to the opp_table struct this opp belongs to
@@ -83,10 +80,8 @@ struct dev_pm_opp {
bool suspend;
unsigned long rate;
- unsigned long u_volt;
- unsigned long u_volt_min;
- unsigned long u_volt_max;
- unsigned long u_amp;
+ struct dev_pm_opp_supply *supplies;
+
unsigned long clock_latency_ns;
struct opp_table *opp_table;
@@ -144,7 +139,10 @@ enum opp_table_access {
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
* @clk: Device's clock handle
- * @regulator: Supply regulator
+ * @regulators: Supply regulators
+ * @regulator_count: Number of power supply regulators
+ * @set_opp: Platform specific set_opp callback
+ * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -179,7 +177,11 @@ struct opp_table {
unsigned int supported_hw_count;
const char *prop_name;
struct clk *clk;
- struct regulator *regulator;
+ struct regulator **regulators;
+ unsigned int regulator_count;
+
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
+ struct dev_pm_set_opp_data *set_opp_data;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@@ -190,7 +192,6 @@ struct opp_table {
/* Routines internal to opp core */
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-struct device_node *_of_get_opp_desc_node(struct device *dev);
void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 50e30e7b059d..a46e97e515c5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
+ WAKE_IRQ_DEDICATED_MANAGED)
+
struct wake_irq {
struct device *dev;
+ unsigned int status;
int irq;
- bool dedicated_irq:1;
};
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
}
+static inline void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+}
+
+static inline void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
@@ -127,6 +144,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -146,6 +168,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7f3646e459cb..58fcc758334e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
struct dev_pm_qos_request *req;
if (val < 0) {
- ret = -EINVAL;
+ if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+ ret = 0;
+ else
+ ret = -EINVAL;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
/**
* dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 82a081ea4317..872eac4cb1df 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
+
+#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@@ -241,7 +243,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EACCES;
else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
- else if (!pm_children_suspended(dev))
+ else if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count))
retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */
@@ -258,6 +261,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+ link->rpm_active)
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ if (retval < 0) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ link->rpm_active = true;
+ }
+ return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->rpm_active &&
+ READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ pm_runtime_put(link->supplier);
+ link->rpm_active = false;
+ }
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -266,19 +305,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval;
+ int retval, idx;
+ bool use_links = dev->power.links_count > 0;
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
- else
+ } else {
spin_unlock_irq(&dev->power.lock);
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval)
+ goto fail;
+
+ device_links_read_unlock(idx);
+ }
+ }
+
retval = cb(dev);
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- else
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ fail:
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
spin_lock_irq(&dev->power.lock);
+ }
return retval;
}
@@ -515,7 +592,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- dev_pm_enable_wake_irq(dev);
+ dev_pm_enable_wake_irq_check(dev, true);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
@@ -554,7 +631,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
- dev_pm_disable_wake_irq(dev);
+ dev_pm_disable_wake_irq_check(dev);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -712,8 +789,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's runtime PM is disabled or it
- * is set to ignore children.
+ * Resume the parent if it has runtime PM enabled and not been
+ * set to ignore its children.
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children) {
@@ -737,12 +814,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq(dev);
+ dev_pm_disable_wake_irq_check(dev);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
- dev_pm_enable_wake_irq(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
@@ -1027,7 +1104,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out_set;
if (status == RPM_SUSPENDED) {
- /* It always is possible to set the status to 'suspended'. */
+ /*
+ * It is invalid to suspend a device with an active child,
+ * unless it has been set to ignore its children.
+ */
+ if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count)) {
+ dev_err(dev, "runtime PM trying to suspend device but active child\n");
+ error = -EBUSY;
+ goto out;
+ }
+
if (parent) {
atomic_add_unless(&parent->power.child_count, -1, 0);
notify_parent = !parent->power.ignore_children;
@@ -1447,6 +1534,94 @@ void pm_runtime_remove(struct device *dev)
}
/**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->rpm_active) {
+ pm_runtime_put_noidle(dev);
+ link->rpm_active = false;
+ }
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_put(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
@@ -1478,6 +1653,16 @@ int pm_runtime_force_suspend(struct device *dev)
if (ret)
goto err;
+ /*
+ * Increase the runtime PM usage count for the device's parent, in case
+ * when we find the device being used when system suspend was invoked.
+ * This informs pm_runtime_force_resume() to resume the parent
+ * immediately, which is needed to be able to resume its children,
+ * when not deferring the resume to be managed via runtime PM.
+ */
+ if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
+ pm_runtime_get_noresume(dev->parent);
+
pm_runtime_set_suspended(dev);
return 0;
err:
@@ -1487,16 +1672,20 @@ err:
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
/**
- * pm_runtime_force_resume - Force a device into resume state.
+ * pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power. We update the runtime PM
- * status and re-enables runtime PM.
+ * those actions and brings the device into full power, if it is expected to be
+ * used on system resume. To distinguish that, we check whether the runtime PM
+ * usage count is greater than 1 (the PM core increases the usage count in the
+ * system PM prepare phase), as that indicates a real user (such as a subsystem,
+ * driver, userspace, etc.) is using it. If that is the case, the device is
+ * expected to be used on system resume as well, so then we resume it. In the
+ * other case, we defer the resume to be managed via runtime PM.
*
- * Typically this function may be invoked from a system resume callback to make
- * sure the device is put into full power state.
+ * Typically this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
@@ -1513,6 +1702,17 @@ int pm_runtime_force_resume(struct device *dev)
if (!pm_runtime_status_suspended(dev))
goto out;
+ /*
+ * Decrease the parent's runtime PM usage count, if we increased it
+ * during system suspend in pm_runtime_force_suspend().
+ */
+ if (atomic_read(&dev->power.usage_count) > 1) {
+ if (dev->parent)
+ pm_runtime_put_noidle(dev->parent);
+ } else {
+ goto out;
+ }
+
ret = pm_runtime_set_active(dev);
if (ret)
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b46798c81d..33b4b902741a 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value)) {
+ if (kstrtos32(buf, 0, &value) == 0) {
+ /* Users can't write negative values directly */
+ if (value < 0)
+ return -EINVAL;
+ } else {
if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index efec10b49d59..1cda505d6a85 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -10,6 +10,7 @@
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
+#include <linux/suspend.h>
#include <linux/mc146818rtc.h>
@@ -74,6 +75,9 @@
#define DEVSEED (7919)
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
static unsigned int dev_hash_value;
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
@@ -104,6 +108,7 @@ static int set_magic_time(unsigned int user, unsigned int file, unsigned int dev
time.tm_min = (n % 20) * 3;
n /= 20;
mc146818_set_time(&time);
+ pm_trace_rtc_abused = true;
return n ? -1 : 0;
}
@@ -239,9 +244,31 @@ int show_trace_dev_match(char *buf, size_t size)
return ret;
}
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ if (pm_trace_rtc_abused) {
+ pm_trace_rtc_abused = false;
+ pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+ .notifier_call = pm_trace_notify,
+};
+
static int early_resume_init(void)
{
hash_value_early_read = read_magic_time();
+ register_pm_notifier(&pm_trace_nb);
return 0;
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0d77cd6fd8d1..404d94c6c8bc 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (wirq->dedicated_irq)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
free_irq(wirq->irq, wirq);
+ wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+ }
kfree(wirq);
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- wirq->dedicated_irq = true;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
/*
@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+
return err;
err_free_irq:
@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
- * Called from the bus code or the device driver for
- * runtime_suspend() to enable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
*
* Note that for runtime_suspend()) the wake-up interrupts
* should be unconditionally enabled unlike for suspend()
@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (wirq && wirq->dedicated_irq)
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
enable_irq(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
@@ -231,20 +234,73 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
* dev_pm_disable_wake_irq - Disable device wake-up interrupt
* @dev: Device
*
- * Called from the bus code or the device driver for
- * runtime_resume() to disable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
*/
void dev_pm_disable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (wirq && wirq->dedicated_irq)
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
disable_irq_nosync(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
/**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ return;
+
+ if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+ goto enable;
+ } else if (can_change_status) {
+ wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+ goto enable;
+ }
+
+ return;
+
+enable:
+ enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+ disable_irq_nosync(wirq->irq);
+}
+
+/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 62e4de2aa8d1..f546f8f107b0 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -811,7 +811,7 @@ void pm_print_active_wakeup_sources(void)
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_info("active wakeup source: %s\n", ws->name);
+ pr_debug("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -822,7 +822,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_info("last active wakeup source: %s\n",
+ pr_debug("last active wakeup source: %s\n",
last_activity_ws->name);
rcu_read_unlock();
}
@@ -905,7 +905,7 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
split_counters(&cnt, &inpr);
if (inpr == 0 || signal_pending(current))
break;
-
+ pm_print_active_wakeup_sources();
schedule();
}
finish_wait(&wakeup_count_wait_queue, &wait);
@@ -998,14 +998,14 @@ static int print_wakeup_source_stats(struct seq_file *m,
active_time = ktime_sub(now, ws->last_time);
total_time = ktime_add(total_time, active_time);
- if (active_time.tv64 > max_time.tv64)
+ if (active_time > max_time)
max_time = active_time;
if (ws->autosleep_enabled)
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(now, ws->start_prevent_time));
} else {
- active_time = ktime_set(0, 0);
+ active_time = 0;
}
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 6f77d7319fc6..4ff311374c4a 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -236,15 +236,13 @@ static int regcache_lzo_read(struct regmap *map,
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
@@ -275,15 +273,13 @@ static int regcache_lzo_write(struct regmap *map,
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index b63f23e6ad61..dc26e5949a32 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
+#include <linux/glob.h>
static DEFINE_IDA(soc_ida);
@@ -113,6 +114,12 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
struct soc_device *soc_dev;
int ret;
+ if (!soc_bus_type.p) {
+ ret = bus_register(&soc_bus_type);
+ if (ret)
+ goto out1;
+ }
+
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
if (!soc_dev) {
ret = -ENOMEM;
@@ -156,6 +163,78 @@ void soc_device_unregister(struct soc_device *soc_dev)
static int __init soc_bus_register(void)
{
+ if (soc_bus_type.p)
+ return 0;
+
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
+
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ const struct soc_device_attribute *match = arg;
+
+ if (match->machine &&
+ (!soc_dev->attr->machine ||
+ !glob_match(match->machine, soc_dev->attr->machine)))
+ return 0;
+
+ if (match->family &&
+ (!soc_dev->attr->family ||
+ !glob_match(match->family, soc_dev->attr->family)))
+ return 0;
+
+ if (match->revision &&
+ (!soc_dev->attr->revision ||
+ !glob_match(match->revision, soc_dev->attr->revision)))
+ return 0;
+
+ if (match->soc_id &&
+ (!soc_dev->attr->soc_id ||
+ !glob_match(match->soc_id, soc_dev->attr->soc_id)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * soc_device_match - identify the SoC in the machine
+ * @matches: zero-terminated array of possible matches
+ *
+ * returns the first matching entry of the argument array, or NULL
+ * if none of them match.
+ *
+ * This function is meant as a helper in place of of_match_node()
+ * in cases where either no device tree is available or the information
+ * in a device node is insufficient to identify a particular variant
+ * by its compatible strings or other properties. For new devices,
+ * the DT binding should always provide unique compatible strings
+ * that allow the use of of_match_node() instead.
+ *
+ * The calling function can use the .data entry of the
+ * soc_device_attribute to pass a structure or function pointer for
+ * each entry.
+ */
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches)
+{
+ int ret = 0;
+
+ if (!matches)
+ return NULL;
+
+ while (!ret) {
+ if (!(matches->machine || matches->family ||
+ matches->revision || matches->soc_id))
+ break;
+ ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
+ soc_device_match_one);
+ if (!ret)
+ matches++;
+ else
+ return matches;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 000000000000..9aa0d45a60db
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,9 @@
+config TEST_ASYNC_DRIVER_PROBE
+ tristate "Build kernel module to test asynchronous driver probing"
+ depends on m
+ help
+ Enabling this option produces a kernel module that allows
+ testing asynchronous driver probing by the device core.
+ The module name will be test_async_driver_probe.ko
+
+ If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 000000000000..90477c5fd9f9
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 000000000000..304d5c2bd5e9
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
+#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_info(&pdev->dev, "done sleeping\n");
+
+ return 0;
+}
+
+static struct platform_driver async_driver = {
+ .driver = {
+ .name = "test_async_driver",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+ .driver = {
+ .name = "test_sync_driver",
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+ ktime_t calltime, delta;
+ unsigned long long duration;
+ int error;
+
+ pr_info("registering first asynchronous device...\n");
+
+ async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+ NULL, 0);
+ if (IS_ERR(async_dev_1)) {
+ error = PTR_ERR(async_dev_1);
+ pr_err("failed to create async_dev_1: %d", error);
+ return error;
+ }
+
+ pr_info("registering asynchronous driver...\n");
+ calltime = ktime_get();
+ error = platform_driver_register(&async_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_1;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+ pr_info("registering second asynchronous device...\n");
+ calltime = ktime_get();
+ async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+ NULL, 0);
+ if (IS_ERR(async_dev_2)) {
+ error = PTR_ERR(async_dev_2);
+ pr_err("failed to create async_dev_2: %d", error);
+ goto err_unregister_async_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous driver...\n");
+
+ error = platform_driver_register(&sync_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous device...\n");
+ calltime = ktime_get();
+ sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+ NULL, 0);
+ if (IS_ERR(sync_dev_1)) {
+ error = PTR_ERR(sync_dev_1);
+ pr_err("failed to create sync_dev_1: %d", error);
+ goto err_unregister_sync_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe was too quick\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_sync_dev_1;
+ }
+
+ pr_info("completed successfully");
+
+ return 0;
+
+err_unregister_sync_dev_1:
+ platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+ platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+ platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+ platform_device_unregister(async_dev_1);
+
+ return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+ platform_driver_unregister(&async_driver);
+ platform_driver_unregister(&sync_driver);
+ platform_device_unregister(async_dev_1);
+ platform_device_unregister(async_dev_2);
+ platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index df3c97cb4c99..d6ec1c546f5b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -118,51 +118,19 @@ static int topology_add_dev(unsigned int cpu)
return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
-static void topology_remove_dev(unsigned int cpu)
+static int topology_remove_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, &topology_attr_group);
-}
-
-static int topology_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rc = topology_add_dev(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- topology_remove_dev(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ return 0;
}
static int topology_sysfs_init(void)
{
- int cpu;
- int rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = topology_add_dev(cpu);
- if (rc)
- goto out;
- }
- __hotcpu_notifier(topology_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
+ "base/topology:prepare", topology_add_dev,
+ topology_remove_dev);
}
device_initcall(topology_sysfs_init);