summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2025-08-27 12:45:45 +0300
committerAlex Deucher <alexander.deucher@amd.com>2025-09-18 23:59:14 +0300
commit59e4405e9ee2b318342d252422a82dd863b89ef4 (patch)
tree674721daa682adb6c53df107cb326d120cc46a46 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
parent0aa09d8a6cab0f7d284e61eff28ac3be4d324c20 (diff)
downloadlinux-59e4405e9ee2b318342d252422a82dd863b89ef4.tar.xz
drm/amdgpu: revert to old status lock handling v3
It turned out that protecting the status of each bo_va with a spinlock was just hiding problems instead of solving them. Revert the whole approach, add a separate stats_lock and lockdep assertions that the correct reservation lock is held all over the place. This not only allows for better checks if a state transition is properly protected by a lock, but also switching back to using list macros to iterate over the state of lists protected by the dma_resv lock of the root PD. v2: re-add missing check v3: split into two patches Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 30022123b0bf..f57c48b74274 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -541,9 +541,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
- spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
@@ -587,7 +585,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_pt_cursor seek;
struct amdgpu_vm_bo_base *entry;
- spin_lock(&params->vm->status_lock);
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
if (entry && entry->bo)
list_move(&entry->vm_status, &params->tlb_flush_waitlist);
@@ -595,7 +592,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
/* enter start node now */
list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
- spin_unlock(&params->vm->status_lock);
}
/**