diff options
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r-- | drivers/base/power/main.c | 365 |
1 files changed, 298 insertions, 67 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c8b0a9e29ed8..dbf5456cd891 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -63,8 +63,23 @@ static LIST_HEAD(dpm_noirq_list); static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; +static DEFINE_MUTEX(async_wip_mtx); static int async_error; +/** + * pm_hibernate_is_recovering - if recovering from hibernate due to error. + * + * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or + * recovering from some error. + * + * Return: true for error case, false for normal case. + */ +bool pm_hibernate_is_recovering(void) +{ + return pm_transition.event == PM_EVENT_RECOVER; +} +EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering); + static const char *pm_verb(int event) { switch (event) { @@ -512,7 +527,7 @@ struct dpm_watchdog { */ static void dpm_watchdog_handler(struct timer_list *t) { - struct dpm_watchdog *wd = from_timer(wd, t, timer); + struct dpm_watchdog *wd = timer_container_of(wd, t, timer); struct timer_list *timer = &wd->timer; unsigned int time_left; @@ -560,7 +575,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) struct timer_list *timer = &wd->timer; timer_delete_sync(timer); - destroy_timer_on_stack(timer); + timer_destroy_on_stack(timer); } #else #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) @@ -597,8 +612,11 @@ static bool is_async(struct device *dev) && !pm_trace_is_enabled(); } -static bool dpm_async_fn(struct device *dev, async_func_t func) +static bool __dpm_async(struct device *dev, async_func_t func) { + if (dev->power.work_in_progress) + return true; + if (!is_async(dev)) return false; @@ -611,14 +629,57 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) put_device(dev); + return false; +} + +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + guard(mutex)(&async_wip_mtx); + + return __dpm_async(dev, func); +} + +static int dpm_async_with_cleanup(struct device *dev, void *fn) +{ + guard(mutex)(&async_wip_mtx); + + if (!__dpm_async(dev, fn)) + dev->power.work_in_progress = false; + + return 0; +} + +static void dpm_async_resume_children(struct device *dev, async_func_t func) +{ /* - * async_schedule_dev_nocall() above has returned false, so func() is - * not running and it is safe to update power.work_in_progress without - * extra synchronization. + * Prevent racing with dpm_clear_async_state() during initial list + * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and + * dpm_resume(). */ - dev->power.work_in_progress = false; + guard(mutex)(&dpm_list_mtx); - return false; + /* + * Start processing "async" children of the device unless it's been + * started already for them. + */ + device_for_each_child(dev, func, dpm_async_with_cleanup); +} + +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); } static void dpm_clear_async_state(struct device *dev) @@ -627,6 +688,20 @@ static void dpm_clear_async_state(struct device *dev) dev->power.work_in_progress = false; } +static bool dpm_root_device(struct device *dev) +{ + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie); + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -706,10 +781,12 @@ Out: TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } + + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -733,19 +810,20 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_noirq_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_noirq); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_noirq); } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_noirq)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -760,7 +838,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "noirq"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); @@ -781,6 +859,8 @@ void dpm_resume_noirq(pm_message_t state) device_wakeup_disarm_wake_irqs(); } +static void async_resume_early(void *data, async_cookie_t cookie); + /** * device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. @@ -844,10 +924,12 @@ Out: complete_all(&dev->power.completion); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } + + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -875,19 +957,20 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_late_early_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_early); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_early); } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_early)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -902,7 +985,7 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "early"); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME_EARLY); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); @@ -919,6 +1002,8 @@ void dpm_resume_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_resume_start); +static void async_resume(void *data, async_cookie_t cookie); + /** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. @@ -941,6 +1026,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (!dev->power.is_suspended) goto Complete; + dev->power.is_suspended = false; + if (dev->power.direct_complete) { /* * Allow new children to be added under the device after this @@ -1003,7 +1090,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) End: error = dpm_run_callback(callback, dev, state, info); - dev->power.is_suspended = false; device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1014,10 +1100,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } + + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1041,7 +1129,6 @@ void dpm_resume(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume"), state.event, true); - might_sleep(); pm_transition = state; async_error = 0; @@ -1049,19 +1136,20 @@ void dpm_resume(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_suspended_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume); } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1076,7 +1164,7 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, NULL); - if (async_error) + if (READ_ONCE(async_error)) dpm_save_failed_step(SUSPEND_RESUME); cpufreq_resume(); @@ -1143,7 +1231,6 @@ void dpm_complete(pm_message_t state) struct list_head list; trace_suspend_resume(TPS("dpm_complete"), state.event, true); - might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); @@ -1182,6 +1269,7 @@ void dpm_complete(pm_message_t state) void dpm_resume_end(pm_message_t state) { dpm_resume(state); + pm_restore_gfp_mask(); dpm_complete(state); } EXPORT_SYMBOL_GPL(dpm_resume_end); @@ -1189,6 +1277,82 @@ EXPORT_SYMBOL_GPL(dpm_resume_end); /*------------------------- Suspend routines -------------------------*/ +static bool dpm_leaf_device(struct device *dev) +{ + struct device *child; + + lockdep_assert_held(&dpm_list_mtx); + + child = device_find_any_child(dev); + if (child) { + put_device(child); + + return false; + } + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return list_empty(&dev->links.consumers); +} + +static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) +{ + guard(mutex)(&dpm_list_mtx); + + /* + * If the device is suspended asynchronously and the parent's callback + * deletes both the device and the parent itself, the parent object may + * be freed while this function is running, so avoid that by checking + * if the device has been deleted already as the parent cannot be + * deleted before it. + */ + if (!device_pm_initialized(dev)) + return false; + + /* Start processing the device's parent if it is "async". */ + if (dev->parent) + dpm_async_with_cleanup(dev->parent, func); + + return true; +} + +static void dpm_async_suspend_superior(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + if (!dpm_async_suspend_parent(dev, func)) + return; + + idx = device_links_read_lock(); + + /* Start processing the device's "async" suppliers. */ + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->supplier, func); + + device_links_read_unlock(idx); +} + +static void dpm_async_suspend_complete_all(struct list_head *device_list) +{ + struct device *dev; + + guard(mutex)(&async_wip_mtx); + + list_for_each_entry_reverse(dev, device_list, power.entry) { + /* + * In case the device is being waited for and async processing + * has not started for it yet, let the waiters make progress. + */ + if (!dev->power.work_in_progress) + complete_all(&dev->power.completion); + } +} + /** * resume_event - Return a "resume" message for given "suspend" sleep state. * @sleep_state: PM message representing a sleep state. @@ -1226,6 +1390,8 @@ static void dpm_superior_set_must_resume(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend_noirq(void *data, async_cookie_t cookie); + /** * device_suspend_noirq - Execute a "noirq suspend" callback for given device. * @dev: Device to handle. @@ -1235,7 +1401,7 @@ static void dpm_superior_set_must_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1246,7 +1412,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (dev->power.syscore || dev->power.direct_complete) @@ -1279,7 +1445,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); goto Complete; @@ -1304,7 +1470,11 @@ Skip: Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || READ_ONCE(async_error)) + return; + + dpm_async_suspend_superior(dev, async_suspend_noirq); } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1318,7 +1488,8 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie) static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); - int error = 0; + struct device *dev; + int error; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1327,12 +1498,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_noirq); + } + while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.prev); + dev = to_device(dpm_late_early_list.prev); list_move(&dev->power.entry, &dpm_noirq_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1340,22 +1520,28 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state, false); + device_suspend_noirq(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_late_early_list); + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice_init(&dpm_late_early_list, &dpm_noirq_list); break; + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); @@ -1400,6 +1586,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) spin_unlock_irq(&parent->power.lock); } +static void async_suspend_late(void *data, async_cookie_t cookie); + /** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. @@ -1408,7 +1596,7 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state, bool async) +static void device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1425,11 +1613,11 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn dpm_wait_for_subordinate(dev, async); - if (async_error) + if (READ_ONCE(async_error)) goto Complete; if (pm_wakeup_pending()) { - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1463,7 +1651,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn Run: error = dpm_run_callback(callback, dev, state, info); if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async late" : " late", error); goto Complete; @@ -1476,7 +1664,11 @@ Skip: Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - return error; + + if (error || READ_ONCE(async_error)) + return; + + dpm_async_suspend_superior(dev, async_suspend_late); } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1494,7 +1686,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie) int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); - int error = 0; + struct device *dev; + int error; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1505,12 +1698,21 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_late); + } + while (!list_empty(&dpm_suspended_list)) { - struct device *dev = to_device(dpm_suspended_list.prev); + dev = to_device(dpm_suspended_list.prev); list_move(&dev->power.entry, &dpm_late_early_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1518,22 +1720,28 @@ int dpm_suspend_late(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state, false); + device_suspend_late(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_suspended_list); + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice_init(&dpm_suspended_list, &dpm_late_early_list); break; + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) { dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); @@ -1614,13 +1822,15 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend(void *data, async_cookie_t cookie); + /** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ -static int device_suspend(struct device *dev, pm_message_t state, bool async) +static void device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1632,7 +1842,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) dpm_wait_for_subordinate(dev, async); - if (async_error) { + if (READ_ONCE(async_error)) { dev->power.direct_complete = false; goto Complete; } @@ -1652,7 +1862,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { dev->power.direct_complete = false; - async_error = -EBUSY; + WRITE_ONCE(async_error, -EBUSY); goto Complete; } @@ -1736,14 +1946,18 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) Complete: if (error) { - async_error = error; + WRITE_ONCE(async_error, error); dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || READ_ONCE(async_error)) + return; + + dpm_async_suspend_superior(dev, async_suspend); } static void async_suspend(void *data, async_cookie_t cookie) @@ -1761,7 +1975,8 @@ static void async_suspend(void *data, async_cookie_t cookie) int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); - int error = 0; + struct device *dev; + int error; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); @@ -1774,12 +1989,21 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend); + } + while (!list_empty(&dpm_prepared_list)) { - struct device *dev = to_device(dpm_prepared_list.prev); + dev = to_device(dpm_prepared_list.prev); list_move(&dev->power.entry, &dpm_suspended_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1787,22 +2011,28 @@ int dpm_suspend(pm_message_t state) mutex_unlock(&dpm_list_mtx); - error = device_suspend(dev, state, false); + device_suspend(dev, state, false); put_device(dev); mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (READ_ONCE(async_error)) { + dpm_async_suspend_complete_all(&dpm_prepared_list); + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice_init(&dpm_prepared_list, &dpm_suspended_list); break; + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - if (!error) - error = async_error; + error = READ_ONCE(async_error); if (error) dpm_save_failed_step(SUSPEND_SUSPEND); @@ -1836,7 +2066,7 @@ static bool device_prepare_smart_suspend(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { - if (!(link->flags & DL_FLAG_PM_RUNTIME)) + if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; if (!dev_pm_smart_suspend(link->supplier) && @@ -1947,7 +2177,6 @@ int dpm_prepare(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_prepare"), state.event, true); - might_sleep(); /* * Give a chance for the known devices to complete their probes, before @@ -2014,8 +2243,10 @@ int dpm_suspend_start(pm_message_t state) error = dpm_prepare(state); if (error) dpm_save_failed_step(SUSPEND_PREPARE); - else + else { + pm_restrict_gfp_mask(); error = dpm_suspend(state); + } dpm_show_time(starttime, state, error, "start"); return error; |