summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power8
-rw-r--r--drivers/base/base.h8
-rw-r--r--drivers/base/power/main.c32
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--include/linux/suspend.h6
-rw-r--r--kernel/power/hibernate.c39
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/suspend.c1
-rw-r--r--kernel/power/swap.c6
11 files changed, 71 insertions, 37 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index e4ec5de9a5dd..9bf7c8a267c5 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -274,15 +274,15 @@ What: /sys/devices/.../power/runtime_active_time
Date: Jul 2010
Contact: Arjan van de Ven <arjan@linux.intel.com>
Description:
- Reports the total time that the device has been active.
- Used for runtime PM statistics.
+ Reports the total time that the device has been active, in
+ milliseconds. Used for runtime PM statistics.
What: /sys/devices/.../power/runtime_suspended_time
Date: Jul 2010
Contact: Arjan van de Ven <arjan@linux.intel.com>
Description:
- Reports total time that the device has been suspended.
- Used for runtime PM statistics.
+ Reports total time that the device has been suspended, in
+ milliseconds. Used for runtime PM statistics.
What: /sys/devices/.../power/runtime_usage
Date: Apr 2010
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 123031a757d9..700aecd22fd3 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2ea6e05e6ec9..b9a34c3425ec 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@@ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3e84dc4122de..7420b9851fe0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
+ dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..dcea66aadfa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2665,7 +2665,7 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering())
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 317ae31e89b3..b02876f1ae38 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -418,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND)
+bool pm_hibernation_mode_is_suspend(void);
+#else
+static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
+#endif
+
int arch_resume_nosmt(void);
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 2f66ab453823..14e85ff23551 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -80,6 +80,17 @@ static const struct platform_hibernation_ops *hibernation_ops;
static atomic_t hibernate_atomic = ATOMIC_INIT(1);
+#ifdef CONFIG_SUSPEND
+/**
+ * pm_hibernation_mode_is_suspend - Check if hibernation has been set to suspend
+ */
+bool pm_hibernation_mode_is_suspend(void)
+{
+ return hibernation_mode == HIBERNATION_SUSPEND;
+}
+EXPORT_SYMBOL_GPL(pm_hibernation_mode_is_suspend);
+#endif
+
bool hibernate_acquire(void)
{
return atomic_add_unless(&hibernate_atomic, -1, 0);
@@ -695,19 +706,13 @@ static void power_down(void)
#ifdef CONFIG_SUSPEND
if (hibernation_mode == HIBERNATION_SUSPEND) {
+ pm_restore_gfp_mask();
error = suspend_devices_and_enter(mem_sleep_current);
- if (error) {
- hibernation_mode = hibernation_ops ?
- HIBERNATION_PLATFORM :
- HIBERNATION_SHUTDOWN;
- } else {
- /* Restore swap signature. */
- error = swsusp_unmark();
- if (error)
- pr_err("Swap will be unusable! Try swapon -a.\n");
+ if (!error)
+ goto exit;
- return;
- }
+ hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM :
+ HIBERNATION_SHUTDOWN;
}
#endif
@@ -718,10 +723,9 @@ static void power_down(void)
case HIBERNATION_PLATFORM:
error = hibernation_platform_enter();
if (error == -EAGAIN || error == -EBUSY) {
- swsusp_unmark();
events_check_enabled = false;
pr_info("Wakeup event detected during hibernation, rolling back.\n");
- return;
+ goto exit;
}
fallthrough;
case HIBERNATION_SHUTDOWN:
@@ -740,6 +744,15 @@ static void power_down(void)
pr_crit("Power down manually\n");
while (1)
cpu_relax();
+
+exit:
+ /* Match the pm_restore_gfp_mask() call in hibernate(). */
+ pm_restrict_gfp_mask();
+
+ /* Restore swap signature. */
+ error = swsusp_unmark();
+ if (error)
+ pr_err("Swap will be unusable! Try swapon -a.\n");
}
static int load_image_and_restore(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index dc0dfc349f22..8ff68ebaa1e0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,6 @@ int freeze_processes(void)
if (!pm_freezing)
static_branch_inc(&freezer_active);
- pm_wakeup_clear(0);
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 501df0676a61..645f42e40478 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -363,7 +363,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
*
* One radix tree is represented by one struct mem_zone_bm_rtree. There are
* two linked lists for the nodes of the tree, one for the inner nodes and
- * one for the leave nodes. The linked leave nodes are used for fast linear
+ * one for the leaf nodes. The linked leaf nodes are used for fast linear
* access of the memory bitmap.
*
* The struct rtree_node represents one node of the radix tree.
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index b4ca17c2fecf..4bb4686c1c08 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -595,6 +595,7 @@ static int enter_state(suspend_state_t state)
}
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
+ pm_wakeup_clear(0);
pm_suspend_clear_flags();
error = suspend_prepare(state);
if (error)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index ad13c461b657..0beff7eeaaba 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -712,7 +712,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
- data = vzalloc(array_size(nr_threads, sizeof(*data)));
+ data = vcalloc(nr_threads, sizeof(*data));
if (!data) {
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
@@ -1225,14 +1225,14 @@ static int load_compressed_image(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
- page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
+ page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
if (!page) {
pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
- data = vzalloc(array_size(nr_threads, sizeof(*data)));
+ data = vcalloc(nr_threads, sizeof(*data));
if (!data) {
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;