diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/base.h | 8 | ||||
-rw-r--r-- | drivers/base/power/main.c | 32 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 |
4 files changed, 30 insertions, 15 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h index 123031a757d9..700aecd22fd3 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev); void fw_devlink_drivers_done(void); void fw_devlink_probing_done(void); +#define dev_for_each_link_to_supplier(__link, __dev) \ + list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \ + device_links_read_lock_held()) + +#define dev_for_each_link_to_consumer(__link, __dev) \ + list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \ + device_links_read_lock_held()) + /* device pm support */ void device_pm_move_to_tail(struct device *dev); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2ea6e05e6ec9..b9a34c3425ec 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -40,10 +40,6 @@ typedef int (*pm_callback_t)(struct device *); -#define list_for_each_entry_rcu_locked(pos, head, member) \ - list_for_each_entry_rcu(pos, head, member, \ - device_links_read_lock_held()) - /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) * callbacks freeing the link objects for the links in the list we're * walking. */ - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->supplier, async); @@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) * continue instead of trying to continue in parallel with its * unregistration). */ - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) + dev_for_each_link_to_consumer(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_wait(link->consumer, async); @@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) idx = device_links_read_lock(); /* Start processing the device's "async" consumers. */ - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) + dev_for_each_link_to_consumer(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_async_with_cleanup(link->consumer, func); @@ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy if (dev->power.syscore || dev->power.direct_complete) goto Out; - if (!dev->power.is_noirq_suspended) + if (!dev->power.is_noirq_suspended) { + /* + * This means that system suspend has been aborted in the noirq + * phase before invoking the noirq suspend callback for the + * device, so if device_suspend_late() has left it in suspend, + * device_resume_early() should leave it in suspend either in + * case the early resume of it depends on the noirq resume that + * has not run. + */ + if (dev_pm_skip_suspend(dev)) + dev->power.must_resume = false; + goto Out; + } if (!dpm_wait_for_superior(dev, async)) goto Out; @@ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func) idx = device_links_read_lock(); /* Start processing the device's "async" suppliers. */ - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) if (READ_ONCE(link->status) != DL_STATE_DORMANT) dpm_async_with_cleanup(link->supplier, func); @@ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + dev_for_each_link_to_supplier(link, dev) link->supplier->power.must_resume = true; device_links_read_unlock(idx); @@ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + dev_for_each_link_to_supplier(link, dev) { spin_lock_irq(&link->supplier->power.lock); link->supplier->power.direct_complete = false; spin_unlock_irq(&link->supplier->power.lock); @@ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + dev_for_each_link_to_supplier(link, dev) { if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) continue; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3e84dc4122de..7420b9851fe0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) + dev_for_each_link_to_supplier(link, dev) if (device_link_test(link, DL_FLAG_PM_RUNTIME)) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 395c6be901ce..dcea66aadfa3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2665,7 +2665,7 @@ static int amdgpu_pmops_thaw(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); /* do not resume device if it's normal hibernation */ - if (!pm_hibernate_is_recovering()) + if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend()) return 0; return amdgpu_device_resume(drm_dev, true); |