diff options
Diffstat (limited to 'drivers/base/power')
-rw-r--r-- | drivers/base/power/Makefile | 3 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 2 | ||||
-rw-r--r-- | drivers/base/power/main.c | 275 | ||||
-rw-r--r-- | drivers/base/power/power.h | 4 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 220 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 164 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 97 |
7 files changed, 594 insertions, 171 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb1f6c0..1cb8544598d5 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,6 +1,5 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o -obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index bfb8955c406c..dc127e5dec4b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -42,7 +42,7 @@ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ if (!__retval && __elapsed > __td->field) { \ __td->field = __elapsed; \ - dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ genpd->max_off_time_changed = true; \ __td->constraint_changed = true; \ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c9fbb9d5484d..86d5e4fb5b98 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -92,6 +92,8 @@ void device_pm_sleep_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; + dev->power.is_noirq_suspended = false; + dev->power.is_late_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; @@ -468,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -480,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) if (dev->power.syscore) goto Out; + if (!dev->power.is_noirq_suspended) + goto Out; + + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -500,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_noirq_suspended = false; Out: + complete_all(&dev->power.completion); TRACE_RESUME(error); return error; } +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_noirq(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -515,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) */ static void dpm_resume_noirq(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_noirq_list)) { - struct device *dev = to_device(dpm_noirq_list.next); - int error; + pm_transition = state; + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_noirq, dev); + } + } + + while (!list_empty(&dpm_noirq_list)) { + dev = to_device(dpm_noirq_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_noirq(dev, state); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); + if (!is_async(dev)) { + int error; + + error = device_resume_noirq(dev, state, false); + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " noirq", error); + } } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); cpuidle_resume(); @@ -550,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state) +static int device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -562,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state) if (dev->power.syscore) goto Out; + if (!dev->power.is_late_suspended) + goto Out; + + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "early power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -582,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_late_suspended = false; Out: TRACE_RESUME(error); pm_runtime_enable(dev); + complete_all(&dev->power.completion); return error; } +static void async_resume_early(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_early(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. */ static void dpm_resume_early(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.next); - int error; + pm_transition = state; + + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_early, dev); + } + } + while (!list_empty(&dpm_late_early_list)) { + dev = to_device(dpm_late_early_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_early(dev, state); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } + if (!is_async(dev)) { + int error; + error = device_resume_early(dev, state, false); + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " early", error); + } + } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "early"); } @@ -733,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie) put_device(dev); } -static bool is_async(struct device *dev) -{ - return dev->power.async_suspend && pm_async_enabled - && !pm_trace_is_enabled(); -} - /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -916,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; + + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } if (dev->power.syscore) - return 0; + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "noirq power domain "; @@ -943,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) callback = pm_noirq_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_noirq_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_noirq(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_noirq, dev); + return 0; + } + return __device_suspend_noirq(dev, pm_transition, false); } /** @@ -961,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state) cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_late_early_list)) { struct device *dev = to_device(dpm_late_early_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state); + error = device_suspend_noirq(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " noirq", error); - suspend_stats.failed_suspend_noirq++; - dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -982,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (!error) + error = async_error; + + if (error) { + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_resume_noirq(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "noirq"); + } return error; } @@ -1002,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state) +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; __pm_runtime_disable(dev, false); + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore) - return 0; + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "late power domain "; @@ -1031,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state) callback = pm_late_early_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_late_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_late(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_late, dev); + return 0; + } + + return __device_suspend_late(dev, pm_transition, false); } /** @@ -1044,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state) int error = 0; mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_suspended_list)) { struct device *dev = to_device(dpm_suspended_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state); + error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); - suspend_stats.failed_suspend_late++; - dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -1065,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (error) { + suspend_stats.failed_suspend_late++; + dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "late"); - + } return error; } diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index cfc3226ec492..a21223d95926 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev); extern void rpm_sysfs_remove(struct device *dev); extern int wakeup_sysfs_add(struct device *dev); extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5c1361a9e5dd..36b9eb4862cb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); s32 __dev_pm_qos_read_value(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->latency); + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); } /** @@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req, int ret; switch(req->type) { - case DEV_PM_QOS_LATENCY: - ret = pm_qos_update_target(&qos->latency, &req->data.pnode, - action, value); + case DEV_PM_QOS_RESUME_LATENCY: + ret = pm_qos_update_target(&qos->resume_latency, + &req->data.pnode, action, value); if (ret) { - value = pm_qos_read_value(&qos->latency); + value = pm_qos_read_value(&qos->resume_latency); blocking_notifier_call_chain(&dev_pm_notifiers, (unsigned long)value, req); } break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + ret = pm_qos_update_target(&qos->latency_tolerance, + &req->data.pnode, action, value); + if (ret) { + value = pm_qos_read_value(&qos->latency_tolerance); + req->dev->power.set_latency_tolerance(req->dev, value); + } + break; case DEV_PM_QOS_FLAGS: ret = pm_qos_update_flags(&qos->flags, &req->data.flr, action, value); @@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) } BLOCKING_INIT_NOTIFIER_HEAD(n); - c = &qos->latency; + c = &qos->resume_latency; plist_head_init(&c->list); - c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; - c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; + c = &qos->latency_tolerance; + plist_head_init(&c->list); + c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + INIT_LIST_HEAD(&qos->flags.list); spin_lock_irq(&dev->power.lock); @@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); pm_qos_sysfs_remove_flags(dev); mutex_lock(&dev_pm_qos_mtx); @@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) goto out; /* Flush the constraints lists for the device. */ - c = &qos->latency; + c = &qos->resume_latency; plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { /* * Update constraints list and call the notification @@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } + c = &qos->latency_tolerance; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } f = &qos->flags; list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } +static bool dev_pm_qos_invalid_request(struct device *dev, + struct dev_pm_qos_request *req) +{ + return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE + && !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret = 0; + + if (!dev || dev_pm_qos_invalid_request(dev, req)) + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + trace_dev_pm_qos_add_request(dev_name(dev), type, value); + if (!ret) { + req->dev = dev; + req->type = type; + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + } + return ret; +} + /** * dev_pm_qos_add_request - inserts new qos request into the list * @dev: target device for the constraint @@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { - int ret = 0; - - if (!dev || !req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(dev_pm_qos_request_active(req), - "%s() called for already added request\n", __func__)) - return -EINVAL; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (IS_ERR(dev->power.qos)) - ret = -ENODEV; - else if (!dev->power.qos) - ret = dev_pm_qos_constraints_allocate(dev); - - trace_dev_pm_qos_add_request(dev_name(dev), type, value); - if (!ret) { - req->dev = dev; - req->type = type; - ret = apply_constraint(req, PM_QOS_ADD_REQ, value); - } - + ret = __dev_pm_qos_add_request(dev, req, type, value); mutex_unlock(&dev_pm_qos_mtx); - return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, return -ENODEV; switch(req->type) { - case DEV_PM_QOS_LATENCY: + case DEV_PM_QOS_RESUME_LATENCY: + case DEV_PM_QOS_LATENCY_TOLERANCE: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ret = dev_pm_qos_constraints_allocate(dev); if (!ret) - ret = blocking_notifier_chain_register( - dev->power.qos->latency.notifiers, notifier); + ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return ret; @@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev, /* Silently return if the constraints object is not present. */ if (!IS_ERR_OR_NULL(dev->power.qos)) - retval = blocking_notifier_chain_unregister( - dev->power.qos->latency.notifiers, - notifier); + retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return retval; @@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. + * @type: Type of the request. * @value: Constraint latency value. */ int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) { struct device *ancestor = dev->parent; int ret = -ENODEV; - while (ancestor && !ancestor->power.ignore_children) - ancestor = ancestor->parent; + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + while (ancestor && !ancestor->power.set_latency_tolerance) + ancestor = ancestor->parent; + + break; + default: + ancestor = NULL; + } if (ancestor) - ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(ancestor, req, type, value); if (ret < 0) req->dev = NULL; @@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, struct dev_pm_qos_request *req = NULL; switch(type) { - case DEV_PM_QOS_LATENCY: - req = dev->power.qos->latency_req; - dev->power.qos->latency_req = NULL; + case DEV_PM_QOS_RESUME_LATENCY: + req = dev->power.qos->resume_latency_req; + dev->power.qos->resume_latency_req = NULL; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + req = dev->power.qos->latency_tolerance_req; + dev->power.qos->latency_tolerance_req = NULL; break; case DEV_PM_QOS_FLAGS: req = dev->power.qos->flags_req; @@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (!req) return -ENOMEM; - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); if (ret < 0) { kfree(req); return ret; @@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (IS_ERR_OR_NULL(dev->power.qos)) ret = -ENODEV; - else if (dev->power.qos->latency_req) + else if (dev->power.qos->resume_latency_req) ret = -EEXIST; if (ret < 0) { @@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + dev->power.qos->resume_latency_req = req; mutex_unlock(&dev_pm_qos_mtx); - ret = pm_qos_sysfs_add_latency(dev); + ret = pm_qos_sysfs_add_resume_latency(dev); if (ret) - dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); out: mutex_unlock(&dev_pm_qos_sysfs_mtx); @@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); } /** @@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev) { mutex_lock(&dev_pm_qos_sysfs_mtx); - pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_resume_latency(dev); mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); @@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) pm_runtime_put(dev); return ret; } + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ + s32 ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req ? + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : + dev->power.qos->latency_tolerance_req->data.pnode.prio; + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req) { + struct dev_pm_qos_request *req; + + if (val < 0) { + ret = -EINVAL; + goto out; + } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); + if (ret < 0) { + kfree(req); + goto out; + } + dev->power.qos->latency_tolerance_req = req; + } else { + if (val < 0) { + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); + ret = 0; + } else { + ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); + } + } + + out: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} #else /* !CONFIG_PM_RUNTIME */ static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} static void __dev_pm_qos_hide_flags(struct device *dev) {} diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 72e00e66ecc5..67c7938e430b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -13,6 +13,43 @@ #include <trace/events/rpm.h> #include "power.h" +#define RPM_GET_CALLBACK(dev, cb) \ +({ \ + int (*__rpm_cb)(struct device *__d); \ + \ + if (dev->pm_domain) \ + __rpm_cb = dev->pm_domain->ops.cb; \ + else if (dev->type && dev->type->pm) \ + __rpm_cb = dev->type->pm->cb; \ + else if (dev->class && dev->class->pm) \ + __rpm_cb = dev->class->pm->cb; \ + else if (dev->bus && dev->bus->pm) \ + __rpm_cb = dev->bus->pm->cb; \ + else \ + __rpm_cb = NULL; \ + \ + if (!__rpm_cb && dev->driver && dev->driver->pm) \ + __rpm_cb = dev->driver->pm->cb; \ + \ + __rpm_cb; \ +}) + +static int (*rpm_get_suspend_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_suspend); +} + +static int (*rpm_get_resume_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_resume); +} + +#ifdef CONFIG_PM_RUNTIME +static int (*rpm_get_idle_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_idle); +} + static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); @@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_idle; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_idle; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_idle; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_idle; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_idle; + callback = rpm_get_idle_cb(dev); if (callback) retval = __rpm_callback(callback, dev); @@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_suspend; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_suspend; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_suspend; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_suspend; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_suspend; + callback = rpm_get_suspend_cb(dev); retval = rpm_callback(callback, dev); if (retval) @@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_resume; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_resume; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_resume; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_resume; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_resume; + callback = rpm_get_resume_cb(dev); retval = rpm_callback(callback, dev); if (retval) { @@ -1130,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier); * @dev: Device to handle. * @check_resume: If set, check if there's a resume request for the device. * - * Increment power.disable_depth for the device and if was zero previously, + * Increment power.disable_depth for the device and if it was zero previously, * cancel all pending runtime PM requests for the device and wait for all * operations in progress to complete. The device can be either active or * suspended after its runtime PM has been disabled. @@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev) if (dev->power.irq_safe && dev->parent) pm_runtime_put(dev->parent); } +#endif + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke it's .runtime_suspend callback to bring it into + * suspend state. Keep runtime PM disabled to preserve the state unless we + * encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state. + */ +int pm_runtime_force_suspend(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + pm_runtime_disable(dev); + + /* + * Note that pm_runtime_status_suspended() returns false while + * !CONFIG_PM_RUNTIME, which means the device will be put into low + * power state. + */ + if (pm_runtime_status_suspended(dev)) + return 0; + + callback = rpm_get_suspend_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto err; + } + + ret = callback(dev); + if (ret) + goto err; + + pm_runtime_set_suspended(dev); + return 0; +err: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and brings the device into full power. We update the runtime PM + * status and re-enables runtime PM. + * + * Typically this function may be invoked from a system resume callback to make + * sure the device is put into full power state. + */ +int pm_runtime_force_resume(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + callback = rpm_get_resume_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto out; + } + + ret = callback(dev); + if (ret) + goto out; + + pm_runtime_set_active(dev); + pm_runtime_mark_last_busy(dev); +out: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 03e089ade5ce..95b181d1ca6d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); + return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); } -static ssize_t pm_qos_latency_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev, if (value < 0) return -EINVAL; - ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); + ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, + value); return ret < 0 ? ret : n; } static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, - pm_qos_latency_show, pm_qos_latency_store); + pm_qos_resume_latency_show, pm_qos_resume_latency_store); + +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + + if (value < 0) + return sprintf(buf, "auto\n"); + else if (value == PM_QOS_LATENCY_ANY) + return sprintf(buf, "any\n"); + + return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value)) { + if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + value = PM_QOS_LATENCY_ANY; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, + pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, @@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = { .attrs = runtime_attrs, }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_resume_latency_us.attr, #endif /* CONFIG_PM_RUNTIME */ NULL, }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { + .name = power_group_name, + .attrs = pm_qos_resume_latency_attrs, +}; + +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = { .name = power_group_name, - .attrs = pm_qos_latency_attrs, + .attrs = pm_qos_latency_tolerance_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { @@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev) if (rc) goto err_out; } - if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); - if (rc) { - if (pm_runtime_callbacks_present(dev)) - sysfs_unmerge_group(&dev->kobj, - &pm_runtime_attr_group); - goto err_out; - } + if (rc) + goto err_runtime; + } + if (dev->power.set_latency_tolerance) { + rc = sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); + if (rc) + goto err_wakeup; } return 0; + err_wakeup: + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; @@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev) { - return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); + return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev) { - sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); + sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) @@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); |