summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorSunil Khatri <sunil.khatri@amd.com>2026-04-08 14:46:24 +0300
committerAlex Deucher <alexander.deucher@amd.com>2026-04-17 22:41:12 +0300
commit51358444d18d8b9905d7eb1a30686aa5610b2b5f (patch)
treeb361150c74cfbdf6523d096fdcc1fc408b5d3de3 /drivers/gpu
parent1eb90c7403c4afae1d791a2671f4873fd8d44c34 (diff)
downloadlinux-51358444d18d8b9905d7eb1a30686aa5610b2b5f.tar.xz
drm/amdgpu/userq: dont lock root bo with userq_mutex held
Do not hold reservation lock for root bo if userq_mutex is already held in the call flow this cause a lock issue with ttm_bo_delayed_delete. Its better to lock the vm->root.bo first and then go ahead with userq_mutex so userq_mutex threads dont get stuck until the reservation lock is held. In this case it helps in the function amdgpu_userq_buffer_vas_mapped for each queue during restore_all. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 33ffbf894801..a0a45c5e6335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -270,15 +270,13 @@ static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
struct amdgpu_bo_va_mapping *mapping;
bool r;
- if (amdgpu_bo_reserve(vm->root.bo, false))
- return false;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
r = true;
else
r = false;
- amdgpu_bo_unreserve(vm->root.bo);
return r;
}
@@ -991,10 +989,16 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_usermode_queue *queue;
unsigned long queue_id;
int ret = 0, r;
+
+ if (amdgpu_bo_reserve(vm->root.bo, false))
+ return false;
+
mutex_lock(&uq_mgr->userq_mutex);
/* Resume all the queues for this process */
xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
@@ -1012,6 +1016,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
}
mutex_unlock(&uq_mgr->userq_mutex);
+ amdgpu_bo_unreserve(vm->root.bo);
if (ret)
drm_file_err(uq_mgr->file,