diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/domain.c | 272 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 63 |
2 files changed, 209 insertions, 126 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 10a61d6147d0..500de1dee967 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd) static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} #endif +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct generic_pm_domain_data *pd_data; + struct pm_domain_data *pdd; + struct gpd_link *link; + + /* New requested state is same as Max requested state */ + if (state == genpd->performance_state) + return state; + + /* New requested state is higher than Max requested state */ + if (state > genpd->performance_state) + return state; + + /* Traverse all devices within the domain */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + pd_data = to_gpd_data(pdd); + + if (pd_data->performance_state > state) + state = pd_data->performance_state; + } + + /* + * Traverse all sub-domains within the domain. This can be + * done without any additional locking as the link->performance_state + * field is protected by the master genpd->lock, which is already taken. + * + * Also note that link->performance_state (subdomain's performance state + * requirement to master domain) is different from + * link->slave->performance_state (current performance state requirement + * of the devices/sub-domains of the subdomain) and so can have a + * different value. + * + * Note that we also take vote from powered-off sub-domains into account + * as the same is done for devices right now. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->performance_state > state) + state = link->performance_state; + } + + return state; +} + +static int _genpd_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state, int depth) +{ + struct generic_pm_domain *master; + struct gpd_link *link; + int master_state, ret; + + if (state == genpd->performance_state) + return 0; + + /* Propagate to masters of genpd */ + list_for_each_entry(link, &genpd->slave_links, slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + /* Find master's performance state */ + ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, + master->opp_table, + state); + if (unlikely(ret < 0)) + goto err; + + master_state = ret; + + genpd_lock_nested(master, depth + 1); + + link->prev_performance_state = link->performance_state; + link->performance_state = master_state; + master_state = _genpd_reeval_performance_state(master, + master_state); + ret = _genpd_set_performance_state(master, master_state, depth + 1); + if (ret) + link->performance_state = link->prev_performance_state; + + genpd_unlock(master); + + if (ret) + goto err; + } + + ret = genpd->set_performance_state(genpd, state); + if (ret) + goto err; + + genpd->performance_state = state; + return 0; + +err: + /* Encountered an error, lets rollback */ + list_for_each_entry_continue_reverse(link, &genpd->slave_links, + slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + genpd_lock_nested(master, depth + 1); + + master_state = link->prev_performance_state; + link->performance_state = master_state; + + master_state = _genpd_reeval_performance_state(master, + master_state); + if (_genpd_set_performance_state(master, master_state, depth + 1)) { + pr_err("%s: Failed to roll back to %d performance state\n", + master->name, master_state); + } + + genpd_unlock(master); + } + + return ret; +} + /** * dev_pm_genpd_set_performance_state- Set performance state of device's power * domain. @@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data, *pd_data; - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data; unsigned int prev; - int ret = 0; + int ret; genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) @@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) prev = gpd_data->performance_state; gpd_data->performance_state = state; - /* New requested state is same as Max requested state */ - if (state == genpd->performance_state) - goto unlock; - - /* New requested state is higher than Max requested state */ - if (state > genpd->performance_state) - goto update_state; - - /* Traverse all devices within the domain */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - pd_data = to_gpd_data(pdd); - - if (pd_data->performance_state > state) - state = pd_data->performance_state; - } - - if (state == genpd->performance_state) - goto unlock; - - /* - * We aren't propagating performance state changes of a subdomain to its - * masters as we don't have hardware that needs it. Over that, the - * performance states of subdomain and its masters may not have - * one-to-one mapping and would require additional information. We can - * get back to this once we have hardware that needs it. For that - * reason, we don't have to consider performance state of the subdomains - * of genpd here. - */ - -update_state: - if (genpd_status_on(genpd)) { - ret = genpd->set_performance_state(genpd, state); - if (ret) { - gpd_data->performance_state = prev; - goto unlock; - } - } - - genpd->performance_state = state; + state = _genpd_reeval_performance_state(genpd, state); + ret = _genpd_set_performance_state(genpd, state, 0); + if (ret) + gpd_data->performance_state = prev; -unlock: genpd_unlock(genpd); return ret; @@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) return ret; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - - if (unlikely(genpd->set_performance_state)) { - ret = genpd->set_performance_state(genpd, genpd->performance_state); - if (ret) { - pr_warn("%s: Failed to set performance state %d (%d)\n", - genpd->name, genpd->performance_state, ret); - } - } - if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) return ret; @@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np, ret); goto unlock; } + + /* + * Save table for faster processing while setting performance + * state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); + WARN_ON(!genpd->opp_table); } ret = genpd_add_provider(np, genpd_xlate_simple, genpd); if (ret) { - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } goto unlock; } @@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np, i, ret); goto error; } + + /* + * Save table for faster processing while setting + * performance state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); + WARN_ON(!genpd->opp_table); } genpd->provider = &np->fwnode; @@ -1989,8 +2080,10 @@ error: genpd->provider = NULL; genpd->has_provider = false; - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } } mutex_unlock(&gpd_list_lock); @@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np) if (!gpd->set_performance_state) continue; + dev_pm_opp_put_opp_table(gpd->opp_table); dev_pm_opp_of_remove_table(&gpd->dev); } } @@ -2338,7 +2432,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); struct device *genpd_dev_pm_attach_by_id(struct device *dev, unsigned int index) { - struct device *genpd_dev; + struct device *virt_dev; int num_domains; int ret; @@ -2352,31 +2446,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, return NULL; /* Allocate and register device on the genpd bus. */ - genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); - if (!genpd_dev) + virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); + if (!virt_dev) return ERR_PTR(-ENOMEM); - dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); - genpd_dev->bus = &genpd_bus_type; - genpd_dev->release = genpd_release_dev; + dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); + virt_dev->bus = &genpd_bus_type; + virt_dev->release = genpd_release_dev; - ret = device_register(genpd_dev); + ret = device_register(virt_dev); if (ret) { - kfree(genpd_dev); + kfree(virt_dev); return ERR_PTR(ret); } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); + ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false); if (ret < 1) { - device_unregister(genpd_dev); + device_unregister(virt_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_enable(genpd_dev); - genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); + pm_runtime_enable(virt_dev); + genpd_queue_power_off_work(dev_to_genpd(virt_dev)); - return genpd_dev; + return virt_dev; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); @@ -2521,52 +2615,36 @@ int of_genpd_parse_idle_states(struct device_node *dn, EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); /** - * of_genpd_opp_to_performance_state- Gets performance state of device's - * power domain corresponding to a DT node's "required-opps" property. + * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. * - * @dev: Device for which the performance-state needs to be found. - * @np: DT node where the "required-opps" property is present. This can be - * the device node itself (if it doesn't have an OPP table) or a node - * within the OPP table of a device (if device has an OPP table). + * @genpd_dev: Genpd's device for which the performance-state needs to be found. + * @opp: struct dev_pm_opp of the OPP for which we need to find performance + * state. * - * Returns performance state corresponding to the "required-opps" property of - * a DT node. This calls platform specific genpd->opp_to_performance_state() - * callback to translate power domain OPP to performance state. + * Returns performance state encoded in the OPP of the genpd. This calls + * platform specific genpd->opp_to_performance_state() callback to translate + * power domain OPP to performance state. * * Returns performance state on success and 0 on failure. */ -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { - struct generic_pm_domain *genpd; - struct dev_pm_opp *opp; - int state = 0; + struct generic_pm_domain *genpd = NULL; + int state; - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return 0; + genpd = container_of(genpd_dev, struct generic_pm_domain, dev); - if (unlikely(!genpd->set_performance_state)) + if (unlikely(!genpd->opp_to_performance_state)) return 0; genpd_lock(genpd); - - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); - if (IS_ERR(opp)) { - dev_err(dev, "Failed to find required OPP: %ld\n", - PTR_ERR(opp)); - goto unlock; - } - state = genpd->opp_to_performance_state(genpd, opp); - dev_pm_opp_put(opp); - -unlock: genpd_unlock(genpd); return state; } -EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); +EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); static int __init genpd_bus_init(void) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index beb85c31f3fa..70624695b6d5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -8,6 +8,8 @@ */ #include <linux/sched/mm.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> @@ -93,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - del_timer(&dev->power.suspend_timer); + hrtimer_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -124,12 +126,11 @@ static void pm_runtime_cancel_pending(struct device *dev) * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ -unsigned long pm_runtime_autosuspend_expiration(struct device *dev) +u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - long elapsed; - unsigned long last_busy; - unsigned long expires = 0; + u64 last_busy, expires = 0; + u64 now = ktime_to_ns(ktime_get()); if (!dev->power.use_autosuspend) goto out; @@ -139,19 +140,9 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) goto out; last_busy = READ_ONCE(dev->power.last_busy); - elapsed = jiffies - last_busy; - if (elapsed < 0) - goto out; /* jiffies has wrapped around. */ - /* - * If the autosuspend_delay is >= 1 second, align the timer by rounding - * up to the nearest second. - */ - expires = last_busy + msecs_to_jiffies(autosuspend_delay); - if (autosuspend_delay >= 1000) - expires = round_jiffies(expires); - expires += !expires; - if (elapsed >= expires - last_busy) + expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; + if (expires <= now) expires = 0; /* Already expired. */ out: @@ -515,7 +506,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) /* If the autosuspend_delay time hasn't expired yet, reschedule. */ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { - unsigned long expires = pm_runtime_autosuspend_expiration(dev); + u64 expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { /* Pending requests need to be canceled. */ @@ -528,10 +519,20 @@ static int rpm_suspend(struct device *dev, int rpmflags) * expire; pm_suspend_timer_fn() will take care of the * rest. */ - if (!(dev->power.timer_expires && time_before_eq( - dev->power.timer_expires, expires))) { + if (!(dev->power.timer_expires && + dev->power.timer_expires <= expires)) { + /* + * We add a slack of 25% to gather wakeups + * without sacrificing the granularity. + */ + u64 slack = READ_ONCE(dev->power.autosuspend_delay) * + (NSEC_PER_MSEC >> 2); + dev->power.timer_expires = expires; - mod_timer(&dev->power.suspend_timer, expires); + hrtimer_start_range_ns(&dev->power.suspend_timer, + ns_to_ktime(expires), + slack, + HRTIMER_MODE_ABS); } dev->power.timer_autosuspends = 1; goto out; @@ -895,23 +896,25 @@ static void pm_runtime_work(struct work_struct *work) * * Check if the time is right and queue a suspend request. */ -static void pm_suspend_timer_fn(struct timer_list *t) +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) { - struct device *dev = from_timer(dev, t, power.suspend_timer); + struct device *dev = container_of(timer, struct device, power.suspend_timer); unsigned long flags; - unsigned long expires; + u64 expires; spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; /* If 'expire' is after 'jiffies' we've been called too early. */ - if (expires > 0 && !time_after(expires, jiffies)) { + if (expires > 0 && expires < ktime_to_ns(ktime_get())) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); + + return HRTIMER_NORESTART; } /** @@ -922,6 +925,7 @@ static void pm_suspend_timer_fn(struct timer_list *t) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; + ktime_t expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -938,10 +942,10 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); - dev->power.timer_expires += !dev->power.timer_expires; + expires = ktime_add(ktime_get(), ms_to_ktime(delay)); + dev->power.timer_expires = ktime_to_ns(expires); dev->power.timer_autosuspends = 0; - mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1491,7 +1495,8 @@ void pm_runtime_init(struct device *dev) INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; - timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + dev->power.suspend_timer.function = pm_suspend_timer_fn; init_waitqueue_head(&dev->power.wait_queue); } |