summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c217
-rw-r--r--drivers/base/power/domain_governor.c71
2 files changed, 202 insertions, 86 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e6414e9998bb..679c79545e42 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -124,6 +124,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -237,6 +238,95 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ * device doesn't have any performance state constraints left (And so
+ * the device wouldn't participate anymore to find the target
+ * performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data, *pd_data;
+ struct pm_domain_data *pdd;
+ unsigned int prev;
+ int ret = 0;
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -ENODEV;
+
+ if (unlikely(!genpd->set_performance_state))
+ return -EINVAL;
+
+ if (unlikely(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ genpd_lock(genpd);
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ prev = gpd_data->performance_state;
+ gpd_data->performance_state = state;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ goto update_state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /*
+ * We aren't propagating performance state changes of a subdomain to its
+ * masters as we don't have hardware that needs it. Over that, the
+ * performance states of subdomain and its masters may not have
+ * one-to-one mapping and would require additional information. We can
+ * get back to this once we have hardware that needs it. For that
+ * reason, we don't have to consider performance state of the subdomains
+ * of genpd here.
+ */
+
+update_state:
+ if (genpd_status_on(genpd)) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret) {
+ gpd_data->performance_state = prev;
+ goto unlock;
+ }
+ }
+
+ genpd->performance_state = state;
+
+unlock:
+ genpd_unlock(genpd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -256,6 +346,15 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+
+ if (unlikely(genpd->set_performance_state)) {
+ ret = genpd->set_performance_state(genpd, genpd->performance_state);
+ if (ret) {
+ pr_warn("%s: Failed to set performance state %d (%d)\n",
+ genpd->name, genpd->performance_state, ret);
+ }
+ }
+
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -747,11 +846,7 @@ late_initcall(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-/**
- * pm_genpd_present - Check if the given PM domain has been initialized.
- * @genpd: PM domain to check.
- */
-static bool pm_genpd_present(const struct generic_pm_domain *genpd)
+static bool genpd_present(const struct generic_pm_domain *genpd)
{
const struct generic_pm_domain *gpd;
@@ -769,12 +864,6 @@ static bool pm_genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP
-static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
-}
-
/**
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -861,7 +950,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
* @genpd: PM domain the device belongs to.
*
* There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * states should be resumed by genpd_prepare(): (1) if the device is enabled
* to wake up the system and it has to remain active for this purpose while the
* system is in the sleep state and (2) if the device is not enabled to wake up
* the system from sleep states and it generally doesn't generate wakeup signals
@@ -879,12 +968,12 @@ static bool resume_needed(struct device *dev,
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd_dev_active_wakeup(genpd, dev);
+ active_wakeup = genpd_is_active_wakeup(genpd);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
/**
- * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
* Start a power transition of a device (during a system-wide power transition)
@@ -892,7 +981,7 @@ static bool resume_needed(struct device *dev,
* an object of type struct generic_pm_domain representing a PM domain
* consisting of I/O devices.
*/
-static int pm_genpd_prepare(struct device *dev)
+static int genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
@@ -948,7 +1037,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
if (poweroff)
@@ -973,13 +1062,13 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
}
/**
- * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int genpd_suspend_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -987,12 +1076,12 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
- * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
-static int pm_genpd_resume_noirq(struct device *dev)
+static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1003,7 +1092,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
genpd_lock(genpd);
@@ -1022,7 +1111,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
}
/**
- * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -1030,7 +1119,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
-static int pm_genpd_freeze_noirq(struct device *dev)
+static int genpd_freeze_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1052,13 +1141,13 @@ static int pm_genpd_freeze_noirq(struct device *dev)
}
/**
- * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Start the device, unless power has been removed from the domain already
* before the system transition.
*/
-static int pm_genpd_thaw_noirq(struct device *dev)
+static int genpd_thaw_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1079,14 +1168,14 @@ static int pm_genpd_thaw_noirq(struct device *dev)
}
/**
- * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
* I/O PM domain.
* @dev: Device to poweroff.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_poweroff_noirq(struct device *dev)
+static int genpd_poweroff_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -1094,13 +1183,13 @@ static int pm_genpd_poweroff_noirq(struct device *dev)
}
/**
- * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
* Make sure the domain will be in the same power state as before the
* hibernation the system is resuming from and start the device if necessary.
*/
-static int pm_genpd_restore_noirq(struct device *dev)
+static int genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1137,7 +1226,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
}
/**
- * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * genpd_complete - Complete power transition of a device in a power domain.
* @dev: Device to complete the transition of.
*
* Complete a power transition of a device (during a system-wide power
@@ -1145,7 +1234,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
-static void pm_genpd_complete(struct device *dev)
+static void genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
@@ -1178,7 +1267,7 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
struct generic_pm_domain *genpd;
genpd = dev_to_genpd(dev);
- if (!pm_genpd_present(genpd))
+ if (!genpd_present(genpd))
return;
if (suspend) {
@@ -1204,14 +1293,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */
-#define pm_genpd_prepare NULL
-#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_noirq NULL
-#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_poweroff_noirq NULL
-#define pm_genpd_restore_noirq NULL
-#define pm_genpd_complete NULL
+#define genpd_prepare NULL
+#define genpd_suspend_noirq NULL
+#define genpd_resume_noirq NULL
+#define genpd_freeze_noirq NULL
+#define genpd_thaw_noirq NULL
+#define genpd_poweroff_noirq NULL
+#define genpd_restore_noirq NULL
+#define genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
@@ -1237,7 +1326,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = -1;
+ gpd_data->td.effective_constraint_ns = 0;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
spin_lock_irq(&dev->power.lock);
@@ -1572,14 +1661,14 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
- genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
- genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
- genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
- genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->domain.ops.prepare = genpd_prepare;
+ genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+ genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+ genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+ genpd->domain.ops.complete = genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -1793,7 +1882,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
mutex_lock(&gpd_list_lock);
- if (pm_genpd_present(genpd)) {
+ if (genpd_present(genpd)) {
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (!ret) {
genpd->provider = &np->fwnode;
@@ -1829,7 +1918,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
for (i = 0; i < data->num_domains; i++) {
if (!data->domains[i])
continue;
- if (!pm_genpd_present(data->domains[i]))
+ if (!genpd_present(data->domains[i]))
goto error;
data->domains[i]->provider = &np->fwnode;
@@ -2272,7 +2361,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
-static struct dentry *pm_genpd_debugfs_dir;
+static struct dentry *genpd_debugfs_dir;
/*
* TODO: This function is a slightly modified version of rtpm_status_show
@@ -2300,8 +2389,8 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
seq_puts(s, p);
}
-static int pm_genpd_summary_one(struct seq_file *s,
- struct generic_pm_domain *genpd)
+static int genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2371,7 +2460,7 @@ static int genpd_summary_show(struct seq_file *s, void *data)
return -ERESTARTSYS;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- ret = pm_genpd_summary_one(s, genpd);
+ ret = genpd_summary_one(s, genpd);
if (ret)
break;
}
@@ -2557,23 +2646,23 @@ define_genpd_debugfs_fops(active_time);
define_genpd_debugfs_fops(total_idle_time);
define_genpd_debugfs_fops(devices);
-static int __init pm_genpd_debug_init(void)
+static int __init genpd_debug_init(void)
{
struct dentry *d;
struct generic_pm_domain *genpd;
- pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+ genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!pm_genpd_debugfs_dir)
+ if (!genpd_debugfs_dir)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
+ genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
if (!d)
return -ENOMEM;
@@ -2593,11 +2682,11 @@ static int __init pm_genpd_debug_init(void)
return 0;
}
-late_initcall(pm_genpd_debug_init);
+late_initcall(genpd_debug_init);
-static void __exit pm_genpd_debug_exit(void)
+static void __exit genpd_debug_exit(void)
{
- debugfs_remove_recursive(pm_genpd_debugfs_dir);
+ debugfs_remove_recursive(genpd_debugfs_dir);
}
-__exitcall(pm_genpd_debug_exit);
+__exitcall(genpd_debug_exit);
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 281f949c5ffe..e4cca8adab32 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,22 +14,33 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
- s32 constraint_ns = -1;
+ s64 constraint_ns;
- if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ /*
+ * Only take suspend-time QoS constraints of devices into
+ * account, because constraints updated after the device has
+ * been suspended are not guaranteed to be taken into account
+ * anyway. In order for them to take effect, the device has to
+ * be resumed and suspended again.
+ */
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
-
- if (constraint_ns < 0) {
+ } else {
+ /*
+ * The child is not in a domain and there's no info on its
+ * suspend/resume latencies, so assume them to be negligible and
+ * take its current PM QoS constraint (that's the only thing
+ * known at this point anyway).
+ */
constraint_ns = dev_pm_qos_read_value(dev);
- constraint_ns *= NSEC_PER_USEC;
+ if (constraint_ns > 0)
+ constraint_ns *= NSEC_PER_USEC;
}
+
+ /* 0 means "no constraint" */
if (constraint_ns == 0)
return 0;
- /*
- * constraint_ns cannot be negative here, because the device has been
- * suspended.
- */
if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
*constraint_ns_p = constraint_ns;
@@ -76,14 +87,32 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
- if (constraint_ns > 0) {
+ if (constraint_ns == 0) {
+ /* "No restriction", so the device is allowed to suspend. */
+ td->effective_constraint_ns = 0;
+ td->cached_suspend_ok = true;
+ } else if (constraint_ns < 0) {
+ /*
+ * This triggers if one of the children that don't belong to a
+ * domain has a negative PM QoS constraint and it's better not
+ * to suspend then. effective_constraint_ns is negative already
+ * and cached_suspend_ok is false, so bail out.
+ */
+ return false;
+ } else {
constraint_ns -= td->suspend_latency_ns +
td->resume_latency_ns;
- if (constraint_ns == 0)
+ /*
+ * effective_constraint_ns is negative already and
+ * cached_suspend_ok is false, so if the computed value is not
+ * positive, return right away.
+ */
+ if (constraint_ns <= 0)
return false;
+
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
}
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
@@ -144,18 +173,16 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*/
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
- /* default_suspend_ok() need not be called before us. */
- if (constraint_ns < 0) {
- constraint_ns = dev_pm_qos_read_value(pdd->dev);
- constraint_ns *= NSEC_PER_USEC;
- }
+ /*
+ * Negative values mean "no suspend at all" and this runs only
+ * when all devices in the domain are suspended, so it must be
+ * 0 at least.
+ *
+ * 0 means "no constraint"
+ */
if (constraint_ns == 0)
continue;
- /*
- * constraint_ns cannot be negative here, because the device has
- * been suspended.
- */
if (constraint_ns <= off_on_time_ns)
return false;