summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_pm.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2023-07-19 11:38:03 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:37:35 +0300
commita00b8f1aae43c46658de0f7f55d8a65acb002159 (patch)
tree275ba77984d30eba9424ecf54c5a7b6ea24f35f6 /drivers/gpu/drm/xe/xe_pm.c
parent09d88e3beb64b8d2e3043fef72dda0df62487e44 (diff)
downloadlinux-a00b8f1aae43c46658de0f7f55d8a65acb002159.tar.xz
drm/xe: fix xe_device_mem_access_get() races
It looks like there is at least one race here, given that the pm_runtime_suspended() check looks to return false if we are in the process of suspending the device (RPM_SUSPENDING vs RPM_SUSPENDED). We later also do xe_pm_runtime_get_if_active(), but since the device is suspending or has now suspended, this doesn't do anything either. Following from this we can potentially return from xe_device_mem_access_get() with the device suspended or about to be, leading to broken behaviour. Attempt to fix this by always grabbing the runtime ref when our internal ref transitions from 0 -> 1. The hard part is then dealing with the runtime_pm callbacks also calling xe_device_mem_access_get() and deadlocking, which the pm_runtime_suspended() check prevented. v2: - ct->lock looks to be primed with fs_reclaim, so holding that and then allocating memory will cause lockdep to complain. Now that we unconditionally grab the mem_access.lock around mem_access_{get,put}, we need to change the ordering wrt to grabbing the ct->lock, since some of the runtime_pm routines can allocate memory (or at least that's what lockdep seems to suggest). Hopefully not a big deal. It might be that there were already issues with this, just that the atomics where "hiding" the potential issues. v3: - Use Thomas Hellström' idea with tracking the active task that is executing in the resume or suspend callback, in order to avoid recursive resume/suspend calls deadlocking on itself. - Split the ct->lock change. v4: - Add smb_mb() around accessing the pm_callback_task for extra safety. (Thomas Hellström) v5: - Clarify the kernel-doc for the mem_access.lock, given that it is quite strange in what it protects (data vs code). The real motivation is to aid lockdep. (Rodrigo Vivi) v6: - Split out the lock change. We still want this as a lockdep aid but only for the xe_device_mem_access_get() path. Sticking a lock on the put() looks be a no-go, also the runtime_put() there is always async. - Now that the lock is gone move to atomics and rely on the pm code serialising multiple callers on the 0 -> 1 transition. - g2h_worker_func() looks to be the next issue, given that suspend-resume callbacks are using CT, so try to handle that. v7: - Add xe_device_mem_access_get_if_ongoing(), and use it in g2h_worker_func(). v8 (Anshuman): - Just always grab the rpm, instead of just on the 0 -> 1 transition, which is a lot clearer and simplifies the code quite a bit. v9: - Make sure we also adjust the CT fast-path with if-active. Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/258 Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Anshuman Gupta <anshuman.gupta@intel.com> Acked-by: Anshuman Gupta <anshuman.gupta@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_pm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c68
1 files changed, 43 insertions, 25 deletions
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index f336aec7085d..04b995aa848f 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -155,37 +155,65 @@ void xe_pm_runtime_fini(struct xe_device *xe)
pm_runtime_forbid(dev);
}
+static void xe_pm_write_callback_task(struct xe_device *xe,
+ struct task_struct *task)
+{
+ WRITE_ONCE(xe->pm_callback_task, task);
+
+ /*
+ * Just in case it's somehow possible for our writes to be reordered to
+ * the extent that something else re-uses the task written in
+ * pm_callback_task. For example after returning from the callback, but
+ * before the reordered write that resets pm_callback_task back to NULL.
+ */
+ smp_mb(); /* pairs with xe_pm_read_callback_task */
+}
+
+struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
+{
+ smp_mb(); /* pairs with xe_pm_write_callback_task */
+
+ return READ_ONCE(xe->pm_callback_task);
+}
+
int xe_pm_runtime_suspend(struct xe_device *xe)
{
struct xe_gt *gt;
u8 id;
- int err;
+ int err = 0;
- if (xe->d3cold.allowed) {
- if (xe_device_mem_access_ongoing(xe))
- return -EBUSY;
+ if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
+ return -EBUSY;
+
+ /* Disable access_ongoing asserts and prevent recursive pm calls */
+ xe_pm_write_callback_task(xe, current);
+ if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
- return err;
+ goto out;
}
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err)
- return err;
+ goto out;
}
xe_irq_suspend(xe);
-
- return 0;
+out:
+ xe_pm_write_callback_task(xe, NULL);
+ return err;
}
int xe_pm_runtime_resume(struct xe_device *xe)
{
struct xe_gt *gt;
u8 id;
- int err;
+ int err = 0;
+
+ /* Disable access_ongoing asserts and prevent recursive pm calls */
+ xe_pm_write_callback_task(xe, current);
/*
* It can be possible that xe has allowed d3cold but other pcie devices
@@ -199,7 +227,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
for_each_gt(gt, xe, id) {
err = xe_pcode_init(gt);
if (err)
- return err;
+ goto out;
}
/*
@@ -208,7 +236,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
*/
err = xe_bo_restore_kernel(xe);
if (err)
- return err;
+ goto out;
}
xe_irq_resume(xe);
@@ -219,10 +247,11 @@ int xe_pm_runtime_resume(struct xe_device *xe)
if (xe->d3cold.allowed && xe->d3cold.power_lost) {
err = xe_bo_restore_user(xe);
if (err)
- return err;
+ goto out;
}
-
- return 0;
+out:
+ xe_pm_write_callback_task(xe, NULL);
+ return err;
}
int xe_pm_runtime_get(struct xe_device *xe)
@@ -236,19 +265,8 @@ int xe_pm_runtime_put(struct xe_device *xe)
return pm_runtime_put_autosuspend(xe->drm.dev);
}
-/* Return true if resume operation happened and usage count was increased */
-bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe)
-{
- /* In case we are suspended we need to immediately wake up */
- if (pm_runtime_suspended(xe->drm.dev))
- return !pm_runtime_resume_and_get(xe->drm.dev);
-
- return false;
-}
-
int xe_pm_runtime_get_if_active(struct xe_device *xe)
{
- WARN_ON(pm_runtime_suspended(xe->drm.dev));
return pm_runtime_get_if_active(xe->drm.dev, true);
}