diff options
Diffstat (limited to 'drivers/base/power')
-rw-r--r-- | drivers/base/power/clock_ops.c | 73 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 24 | ||||
-rw-r--r-- | drivers/base/power/main.c | 186 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 137 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 1 | ||||
-rw-r--r-- | drivers/base/power/wakeirq.c | 26 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 6 |
7 files changed, 273 insertions, 180 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index e18ba676cdf6..b69bcb37c830 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk) } EXPORT_SYMBOL_GPL(pm_clk_add_clk); - -/** - * of_pm_clk_add_clk - Start using a device clock for power management. - * @dev: Device whose clock is going to be used for power management. - * @name: Name of clock that is going to be used for power management. - * - * Add the clock described in the 'clocks' device-tree node that matches - * with the 'name' provided, to the list of clocks used for the power - * management of @dev. On success, returns 0. Returns a negative error - * code if the clock is not found or cannot be added. - */ -int of_pm_clk_add_clk(struct device *dev, const char *name) -{ - struct clk *clk; - int ret; - - if (!dev || !dev->of_node || !name) - return -EINVAL; - - clk = of_clk_get_by_name(dev->of_node, name); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - ret = pm_clk_add_clk(dev, clk); - if (ret) { - clk_put(clk); - return ret; - } - - return 0; -} -EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); - /** * of_pm_clk_add_clks - Start using device clock(s) for power management. * @dev: Device whose clock(s) is going to be used for power management. @@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) } /** - * pm_clk_remove - Stop using a device clock for power management. - * @dev: Device whose clock should not be used for PM any more. - * @con_id: Connection ID of the clock. - * - * Remove the clock represented by @con_id from the list of clocks used for - * the power management of @dev. - */ -void pm_clk_remove(struct device *dev, const char *con_id) -{ - struct pm_subsys_data *psd = dev_to_psd(dev); - struct pm_clock_entry *ce; - - if (!psd) - return; - - pm_clk_list_lock(psd); - - list_for_each_entry(ce, &psd->clock_list, node) { - if (!con_id && !ce->con_id) - goto remove; - else if (!con_id || !ce->con_id) - continue; - else if (!strcmp(con_id, ce->con_id)) - goto remove; - } - - pm_clk_list_unlock(psd); - return; - - remove: - list_del(&ce->node); - if (ce->enabled_when_prepared) - psd->clock_op_might_sleep--; - pm_clk_list_unlock(psd); - - __pm_clk_remove(ce); -} -EXPORT_SYMBOL_GPL(pm_clk_remove); - -/** * pm_clk_remove_clk - Stop using a device clock for power management. * @dev: Device whose clock should not be used for PM any more. * @clk: Clock pointer diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 4fa525668cb7..6502720bb564 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); /** - * pm_generic_freeze_late - Generic freeze_late callback for subsystems. - * @dev: Device to freeze. - */ -int pm_generic_freeze_late(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; -} -EXPORT_SYMBOL_GPL(pm_generic_freeze_late); - -/** * pm_generic_freeze - Generic freeze callback for subsystems. * @dev: Device to freeze. */ @@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); /** - * pm_generic_thaw_early - Generic thaw_early callback for subsystems. - * @dev: Device to thaw. - */ -int pm_generic_thaw_early(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; -} -EXPORT_SYMBOL_GPL(pm_generic_thaw_early); - -/** * pm_generic_thaw - Generic thaw callback for subsystems. * @dev: Device to thaw. */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 4a67e83300e1..1926454c7a7e 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -249,7 +249,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr) static void dpm_wait_for_children(struct device *dev, bool async) { - device_for_each_child(dev, &async, dpm_wait_fn); + device_for_each_child(dev, &async, dpm_wait_fn); } static void dpm_wait_for_suppliers(struct device *dev, bool async) @@ -496,6 +496,7 @@ struct dpm_watchdog { struct device *dev; struct task_struct *tsk; struct timer_list timer; + bool fatal; }; #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ @@ -512,11 +513,23 @@ struct dpm_watchdog { static void dpm_watchdog_handler(struct timer_list *t) { struct dpm_watchdog *wd = from_timer(wd, t, timer); + struct timer_list *timer = &wd->timer; + unsigned int time_left; + + if (wd->fatal) { + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); + show_stack(wd->tsk, NULL, KERN_EMERG); + panic("%s %s: unrecoverable failure\n", + dev_driver_string(wd->dev), dev_name(wd->dev)); + } + + time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; + dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n", + CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left); + show_stack(wd->tsk, NULL, KERN_WARNING); - dev_emerg(wd->dev, "**** DPM device timeout ****\n"); - show_stack(wd->tsk, NULL, KERN_EMERG); - panic("%s %s: unrecoverable failure\n", - dev_driver_string(wd->dev), dev_name(wd->dev)); + wd->fatal = true; + mod_timer(timer, jiffies + HZ * time_left); } /** @@ -530,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) wd->dev = dev; wd->tsk = current; + wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; timer_setup_on_stack(timer, dpm_watchdog_handler, 0); /* use same timeout value for both suspend and resume */ - timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; + timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT; add_timer(timer); } @@ -545,7 +559,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) { struct timer_list *timer = &wd->timer; - del_timer_sync(timer); + timer_delete_sync(timer); destroy_timer_on_stack(timer); } #else @@ -585,27 +599,34 @@ static bool is_async(struct device *dev) static bool dpm_async_fn(struct device *dev, async_func_t func) { - reinit_completion(&dev->power.completion); + if (!is_async(dev)) + return false; - if (is_async(dev)) { - dev->power.async_in_progress = true; + dev->power.work_in_progress = true; - get_device(dev); + get_device(dev); + + if (async_schedule_dev_nocall(func, dev)) + return true; - if (async_schedule_dev_nocall(func, dev)) - return true; + put_device(dev); - put_device(dev); - } /* - * Because async_schedule_dev_nocall() above has returned false or it - * has not been called at all, func() is not running and it is safe to - * update the async_in_progress flag without extra synchronization. + * async_schedule_dev_nocall() above has returned false, so func() is + * not running and it is safe to update power.work_in_progress without + * extra synchronization. */ - dev->power.async_in_progress = false; + dev->power.work_in_progress = false; + return false; } +static void dpm_clear_async_state(struct device *dev) +{ + reinit_completion(&dev->power.completion); + dev->power.work_in_progress = false; +} + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -642,12 +663,12 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy * so change its status accordingly. * * Otherwise, the device is going to be resumed, so set its PM-runtime - * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set - * to avoid confusing drivers that don't use it. + * status to "active" unless its power.smart_suspend flag is clear, in + * which case it is not necessary to update its PM-runtime status. */ if (skip_resume) pm_runtime_set_suspended(dev); - else if (dev_pm_skip_suspend(dev)) + else if (dev_pm_smart_suspend(dev)) pm_runtime_set_active(dev); if (dev->pm_domain) { @@ -715,14 +736,16 @@ static void dpm_noirq_resume_devices(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_noirq_list, power.entry) + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume_noirq); + } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -855,14 +878,16 @@ void dpm_resume_early(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_late_early_list, power.entry) + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume_early); + } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -913,8 +938,20 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (!dev->power.is_suspended) + goto Complete; + + dev->power.is_suspended = false; + if (dev->power.direct_complete) { - /* Match the pm_runtime_disable() in __device_suspend(). */ + /* + * Allow new children to be added under the device after this + * point if it has no PM callbacks. + */ + if (dev->power.no_pm_callbacks) + dev->power.is_prepared = false; + + /* Match the pm_runtime_disable() in device_suspend(). */ pm_runtime_enable(dev); goto Complete; } @@ -931,9 +968,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false; - if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -971,9 +1005,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; - Unlock: device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1021,14 +1053,16 @@ void dpm_resume(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_suspended_list, power.entry) + list_for_each_entry(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume); + } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1093,6 +1127,8 @@ static void device_complete(struct device *dev, pm_message_t state) device_unlock(dev); out: + /* If enabling runtime PM for the device is blocked, unblock it. */ + pm_runtime_unblock(dev); pm_runtime_put(dev); } @@ -1254,14 +1290,13 @@ Skip: dev->power.is_noirq_suspended = true; /* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true; if (dev->power.must_resume) @@ -1298,6 +1333,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1382,6 +1418,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn TRACE_DEVICE(dev); TRACE_SUSPEND(0); + /* + * Disable runtime PM for the device without checking if there is a + * pending resume request for it. + */ __pm_runtime_disable(dev, false); dpm_wait_for_subordinate(dev, async); @@ -1471,6 +1511,7 @@ int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1628,6 +1669,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; } @@ -1738,6 +1780,7 @@ int dpm_suspend(pm_message_t state) list_move(&dev->power.entry, &dpm_suspended_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1769,6 +1812,46 @@ int dpm_suspend(pm_message_t state) return error; } +static bool device_prepare_smart_suspend(struct device *dev) +{ + struct device_link *link; + bool ret = true; + int idx; + + /* + * The "smart suspend" feature is enabled for devices whose drivers ask + * for it and for devices without PM callbacks. + * + * However, if "smart suspend" is not enabled for the device's parent + * or any of its suppliers that take runtime PM into account, it cannot + * be enabled for the device either. + */ + if (!dev->power.no_pm_callbacks && + !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + return false; + + if (dev->parent && !dev_pm_smart_suspend(dev->parent) && + !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent)) + return false; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + continue; + + if (!dev_pm_smart_suspend(link->supplier) && + !pm_runtime_blocked(link->supplier)) { + ret = false; + break; + } + } + + device_links_read_unlock(idx); + + return ret; +} + /** * device_prepare - Prepare a device for system power transition. * @dev: Device to handle. @@ -1780,6 +1863,7 @@ int dpm_suspend(pm_message_t state) static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; + bool smart_suspend; int ret = 0; /* @@ -1789,6 +1873,13 @@ static int device_prepare(struct device *dev, pm_message_t state) * it again during the complete phase. */ pm_runtime_get_noresume(dev); + /* + * If runtime PM is disabled for the device at this point and it has + * never been enabled so far, it should not be enabled until this system + * suspend-resume cycle is complete, so prepare to trigger a warning on + * subsequent attempts to enable it. + */ + smart_suspend = !pm_runtime_block_if_disabled(dev); if (dev->power.syscore) return 0; @@ -1823,6 +1914,13 @@ unlock: pm_runtime_put(dev); return ret; } + /* Do not enable "smart suspend" for devices with disabled runtime PM. */ + if (smart_suspend) + smart_suspend = device_prepare_smart_suspend(dev); + + spin_lock_irq(&dev->power.lock); + + dev->power.smart_suspend = smart_suspend; /* * A positive return value from ->prepare() means "this device appears * to be runtime-suspended and its state is fine, so if it really is @@ -1830,11 +1928,12 @@ unlock: * will do the same thing with all of its descendants". This only * applies to suspend transitions, however. */ - spin_lock_irq(&dev->power.lock); dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && (ret > 0 || dev->power.no_pm_callbacks) && !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); + spin_unlock_irq(&dev->power.lock); + return 0; } @@ -1998,6 +2097,5 @@ void device_pm_check_callbacks(struct device *dev) bool dev_pm_skip_suspend(struct device *dev) { - return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && - pm_runtime_status_suspended(dev); + return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2ee45841486b..c55a7c70bc1a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -448,8 +448,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) retval = __rpm_callback(cb, dev); } - dev->power.runtime_error = retval; - return retval != -EACCES ? retval : -EIO; + /* + * Since -EACCES means that runtime PM is disabled for the given device, + * it should not be returned by runtime PM callbacks. If it is returned + * nevertheless, assume it to be a transient error and convert it to + * -EAGAIN. + */ + if (retval == -EACCES) + retval = -EAGAIN; + + if (retval != -EAGAIN && retval != -EBUSY) + dev->power.runtime_error = retval; + + return retval; } /** @@ -725,21 +736,18 @@ static int rpm_suspend(struct device *dev, int rpmflags) dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); - if (retval == -EAGAIN || retval == -EBUSY) { - dev->power.runtime_error = 0; + /* + * On transient errors, if the callback routine failed an autosuspend, + * and if the last_busy time has been updated so that there is a new + * autosuspend expiration time, automatically reschedule another + * autosuspend. + */ + if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + + pm_runtime_cancel_pending(dev); - /* - * If the callback routine failed an autosuspend, and - * if the last_busy time has been updated so that there - * is a new autosuspend expiration time, automatically - * reschedule another autosuspend. - */ - if ((rpmflags & RPM_AUTO) && - pm_runtime_autosuspend_expiration(dev) != 0) - goto repeat; - } else { - pm_runtime_cancel_pending(dev); - } goto out; } @@ -1003,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + if (expires > 0 && expires <= ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -1460,20 +1468,31 @@ int pm_runtime_barrier(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_barrier); -/** - * __pm_runtime_disable - Disable runtime PM of a device. - * @dev: Device to handle. - * @check_resume: If set, check if there's a resume request for the device. - * - * Increment power.disable_depth for the device and if it was zero previously, - * cancel all pending runtime PM requests for the device and wait for all - * operations in progress to complete. The device can be either active or - * suspended after its runtime PM has been disabled. - * - * If @check_resume is set and there's a resume request pending when - * __pm_runtime_disable() is called and power.disable_depth is zero, the - * function will wake up the device before disabling its runtime PM. - */ +bool pm_runtime_block_if_disabled(struct device *dev) +{ + bool ret; + + spin_lock_irq(&dev->power.lock); + + ret = !pm_runtime_enabled(dev); + if (ret && dev->power.last_status == RPM_INVALID) + dev->power.last_status = RPM_BLOCKED; + + spin_unlock_irq(&dev->power.lock); + + return ret; +} + +void pm_runtime_unblock(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + + if (dev->power.last_status == RPM_BLOCKED) + dev->power.last_status = RPM_INVALID; + + spin_unlock_irq(&dev->power.lock); +} + void __pm_runtime_disable(struct device *dev, bool check_resume) { spin_lock_irq(&dev->power.lock); @@ -1532,6 +1551,10 @@ void pm_runtime_enable(struct device *dev) if (--dev->power.disable_depth > 0) goto out; + if (dev->power.last_status == RPM_BLOCKED) { + dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n"); + dump_stack(); + } dev->power.last_status = RPM_INVALID; dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); @@ -1545,6 +1568,32 @@ out: } EXPORT_SYMBOL_GPL(pm_runtime_enable); +static void pm_runtime_set_suspended_action(void *data) +{ + pm_runtime_set_suspended(data); +} + +/** + * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_set_active_enabled(struct device *dev) +{ + int err; + + err = pm_runtime_set_active(dev); + if (err) + return err; + + err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); + if (err) + return err; + + return devm_pm_runtime_enable(dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); + static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -1567,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); +static void pm_runtime_put_noidle_action(void *data) +{ + pm_runtime_put_noidle(data); +} + +/** + * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_get_noresume(struct device *dev) +{ + pm_runtime_get_noresume(dev); + + return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); + /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. @@ -1764,8 +1831,8 @@ void pm_runtime_init(struct device *dev) INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; - hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - dev->power.suspend_timer.function = pm_suspend_timer_fn; + hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); init_waitqueue_head(&dev->power.wait_queue); } @@ -1874,7 +1941,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || @@ -1959,7 +2026,7 @@ int pm_runtime_force_resume(struct device *dev) int (*callback)(struct device *); int ret = 0; - if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) + if (!dev->power.needs_force_resume) goto out; /* diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index f8163b559bf9..f84018125b46 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,7 +6,6 @@ #include <linux/export.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> -#include <linux/pm_wakeup.h> #include <linux/atomic.h> #include <linux/jiffies.h> #include "power.h" diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 5a5a9e978e85..8aa28c08b289 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); +static void devm_pm_clear_wake_irq(void *dev) +{ + dev_pm_clear_wake_irq(dev); +} + +/** + * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq + * @dev: Device entry + * @irq: Device IO interrupt + * + * + * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq, + * but the device will be auto clear wake capability on driver detach. + */ +int devm_pm_set_wake_irq(struct device *dev, int irq) +{ + int ret; + + ret = dev_pm_set_wake_irq(dev, irq); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq); + /** * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts * @irq: Device specific dedicated wake-up interrupt diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 752b417e8129..63bf914a4d44 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -197,7 +197,7 @@ void wakeup_source_remove(struct wakeup_source *ws) raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); - del_timer_sync(&ws->timer); + timer_delete_sync(&ws->timer); /* * Clear timer.function to make wakeup_source_not_registered() treat * this wakeup source as not registered. @@ -613,7 +613,7 @@ void __pm_stay_awake(struct wakeup_source *ws) spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws, false); - del_timer(&ws->timer); + timer_delete(&ws->timer); ws->timer_expires = 0; spin_unlock_irqrestore(&ws->lock, flags); @@ -693,7 +693,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) ws->max_time = duration; ws->last_time = now; - del_timer(&ws->timer); + timer_delete(&ws->timer); ws->timer_expires = 0; if (ws->autosleep_enabled) |