diff options
author | Thomas Hellström <thomas.hellstrom@linux.intel.com> | 2024-09-03 12:42:32 +0300 |
---|---|---|
committer | Thomas Hellström <thomas.hellstrom@linux.intel.com> | 2024-09-04 10:28:09 +0300 |
commit | 34bb7b813ab398106f700b0a6b218509bb0b904c (patch) | |
tree | 90af611bdf03dd54b0c47eed56302fb67f40d59e | |
parent | 8da19441d0a02b53e362df81843bb20db3a8006a (diff) | |
download | linux-34bb7b813ab398106f700b0a6b218509bb0b904c.tar.xz |
drm/xe: Use xe_pm_runtime_get in xe_bo_move() if reclaim-safe.
xe_bo_move() might be called in the TTM swapout path from validation
by another TTM device. If so, we are not likely to have a RPM
reference. So iff xe_pm_runtime_get() is safe to call from reclaim,
use it instead of xe_pm_runtime_get_noresume().
Strictly this is currently needed only if handle_system_ccs is true,
but use xe_pm_runtime_get() if possible anyway to increase test
coverage.
At the same time warn if handle_system_ccs is true and we can't
call xe_pm_runtime_get() from reclaim context. This will likely trip
if someone tries to enable SRIOV on LNL, without fixing Xe SRIOV
runtime resume / suspend.
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240903094232.166342-1-thomas.hellstrom@linux.intel.com
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.h | 1 |
3 files changed, 19 insertions, 2 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 1c18ba9bd099..0c64d3b3155e 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, xe_assert(xe, migrate); trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); - xe_pm_runtime_get_noresume(xe); + if (xe_rpm_reclaim_safe(xe)) { + /* + * We might be called through swapout in the validation path of + * another TTM device, so unconditionally acquire rpm here. + */ + xe_pm_runtime_get(xe); + } else { + drm_WARN_ON(&xe->drm, handle_system_ccs); + xe_pm_runtime_get_noresume(xe); + } if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) { /* diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 2600c936527e..e518557e0eec 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -79,7 +79,14 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = { }; #endif -static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe) +/** + * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context + * @xe: The xe device. + * + * Return: true if it is safe to runtime resume from reclaim context. + * false otherwise. + */ +bool xe_rpm_reclaim_safe(const struct xe_device *xe) { return !xe->d3cold.capable && !xe->info.has_sriov; } diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 9aef673b1c8a..998d1ed64556 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -31,6 +31,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe); void xe_pm_assert_unbounded_bridge(struct xe_device *xe); int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold); void xe_pm_d3cold_allowed_toggle(struct xe_device *xe); +bool xe_rpm_reclaim_safe(const struct xe_device *xe); struct task_struct *xe_pm_read_callback_task(struct xe_device *xe); int xe_pm_module_init(void); |