summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSunil Khatri <sunil.khatri@amd.com>2026-03-26 10:52:20 +0300
committerAlex Deucher <alexander.deucher@amd.com>2026-04-03 20:59:15 +0300
commit38476bde59948fe85e20bb1e7f3f66525d0c10cd (patch)
treee3dddebcde4f3ebe581a4a6fd6b2e8c9ea0d8f3f
parent4c86e12ab1be971ddb0748e373cf6d25d68bdc22 (diff)
downloadlinux-38476bde59948fe85e20bb1e7f3f66525d0c10cd.tar.xz
drm/amdgpu/userq: call dma_resv_wait_timeout without test for signalled
In function amdgpu_userq_gem_va_unmap_validate call dma_resv_wait_timeout directly. Also since we are waiting forever we should not be having any return value and hence no handling needed. Suggested-by: Christian König <christian.koenig@amd.com> Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c9
3 files changed, 11 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 999d8e298bce..14e590cab2b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -1462,17 +1462,16 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t saddr)
+void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_bo_va *bo_va = mapping->bo_va;
struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
- int ret = 0;
if (!ip_mask)
- return 0;
+ return;
dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
/**
@@ -1483,14 +1482,8 @@ int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
* unmap is only for one kind of userq VAs, so at this point suppose
* the eviction fence is always unsignaled.
*/
- if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
- ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret <= 0)
- return -EBUSY;
- }
-
- return 0;
+ dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
}
void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index a4d44abf24fa..675fe6395ac8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -160,7 +160,7 @@ void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue);
int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *queue,
u64 addr, u64 expected_size);
-int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t saddr);
+void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 73abac6be5b3..00a532f4e027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1978,7 +1978,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
- int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2003,12 +2002,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* during user requests GEM unmap IOCTL except for forcing the unmap
* from user space.
*/
- if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
- r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
- if (unlikely(r == -EBUSY))
- dev_warn_once(adev->dev,
- "Attempt to unmap an active userq buffer\n");
- }
+ if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0))
+ amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);