From 8c45b31909b730f9c7b146588e038f9c6553394d Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 29 Jun 2023 18:01:21 -0400 Subject: drm/amdkfd: Skip handle mapping SVM range with no GPU access If the SVM range has no GPU access nor access-in-place attribute, validate and map to GPU should skip the range. Add NULL pointer check if find_first_bit(ctx->bitmap, MAX_GPU_INSTANCE) returns MAX_GPU_INSTANCE as gpuidx if ctx->bitmap is empty. Signed-off-by: Philip Yang Reviewed-by: Alex Sierra Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 5ff1a5a89d96..479c4f66afa7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1522,6 +1522,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) struct kfd_process_device *pdd; pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) + return NULL; return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } @@ -1596,12 +1598,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { - if (!prange->mapped_to_gpu) { + bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); + if (!prange->mapped_to_gpu || + bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { r = 0; goto free_ctx; } - - bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); } if (prange->actual_loc && !prange->ttm_res) { -- cgit v1.2.3 From 8abc1eb2987aee449e62c72a498374a43b409261 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 19 Apr 2022 12:21:45 +0200 Subject: drm/amdkfd: switch over to using drm_exec v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoids quite a bit of logic and kmalloc overhead. v2: fix multiple problems pointed out by Felix v3: two more nit picks from Felix fixed Signed-off-by: Christian König Reviewed-by: Felix Kuehling Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-4-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/Kconfig | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 299 +++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 18 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 + drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 49 ++-- 6 files changed, 164 insertions(+), 212 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index b91e79c721e2..22d88f8ef527 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -21,6 +21,7 @@ config DRM_AMDGPU select INTERVAL_TREE select DRM_BUDDY select DRM_SUBALLOC_HELPER + select DRM_EXEC # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work # ACPI_VIDEO's dependencies must also be selected. select INPUT if ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2d0406bff84e..1e4cc1fe88fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -25,6 +25,7 @@ #ifndef AMDGPU_AMDKFD_H_INCLUDED #define AMDGPU_AMDKFD_H_INCLUDED +#include #include #include #include @@ -32,7 +33,6 @@ #include #include #include -#include #include "amdgpu_sync.h" #include "amdgpu_vm.h" #include "amdgpu_xcp.h" @@ -71,8 +71,7 @@ struct kgd_mem { struct hmm_range *range; struct list_head attachments; /* protected by amdkfd_process_info.lock */ - struct ttm_validate_buffer validate_list; - struct ttm_validate_buffer resv_list; + struct list_head validate_list; uint32_t domain; unsigned int mapped_to_gpu_memory; uint64_t va; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f61527b800e6..dedfee14acce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -27,6 +27,8 @@ #include #include +#include + #include "amdgpu_object.h" #include "amdgpu_gem.h" #include "amdgpu_vm.h" @@ -964,28 +966,20 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, struct amdkfd_process_info *process_info, bool userptr) { - struct ttm_validate_buffer *entry = &mem->validate_list; - struct amdgpu_bo *bo = mem->bo; - - INIT_LIST_HEAD(&entry->head); - entry->num_shared = 1; - entry->bo = &bo->tbo; mutex_lock(&process_info->lock); if (userptr) - list_add_tail(&entry->head, &process_info->userptr_valid_list); + list_add_tail(&mem->validate_list, + &process_info->userptr_valid_list); else - list_add_tail(&entry->head, &process_info->kfd_bo_list); + list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); mutex_unlock(&process_info->lock); } static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, struct amdkfd_process_info *process_info) { - struct ttm_validate_buffer *bo_list_entry; - - bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); - list_del(&bo_list_entry->head); + list_del(&mem->validate_list); mutex_unlock(&process_info->lock); } @@ -1072,13 +1066,12 @@ out: * object can track VM updates. */ struct bo_vm_reservation_context { - struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ - unsigned int n_vms; /* Number of VMs reserved */ - struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ - struct ww_acquire_ctx ticket; /* Reservation ticket */ - struct list_head list, duplicates; /* BO lists */ - struct amdgpu_sync *sync; /* Pointer to sync object */ - bool reserved; /* Whether BOs are reserved */ + /* DRM execution context for the reservation */ + struct drm_exec exec; + /* Number of VMs reserved */ + unsigned int n_vms; + /* Pointer to sync object */ + struct amdgpu_sync *sync; }; enum bo_vm_match { @@ -1102,35 +1095,26 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, WARN_ON(!vm); - ctx->reserved = false; ctx->n_vms = 1; ctx->sync = &mem->sync; - - INIT_LIST_HEAD(&ctx->list); - INIT_LIST_HEAD(&ctx->duplicates); - - ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); - if (!ctx->vm_pd) - return -ENOMEM; - - ctx->kfd_bo.priority = 0; - ctx->kfd_bo.tv.bo = &bo->tbo; - ctx->kfd_bo.tv.num_shared = 1; - list_add(&ctx->kfd_bo.tv.head, &ctx->list); - - amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); - - ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates); - if (ret) { - pr_err("Failed to reserve buffers in ttm.\n"); - kfree(ctx->vm_pd); - ctx->vm_pd = NULL; - return ret; + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&ctx->exec) { + ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; + + ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; } - - ctx->reserved = true; return 0; + +error: + pr_err("Failed to reserve buffers in ttm.\n"); + drm_exec_fini(&ctx->exec); + return ret; } /** @@ -1147,63 +1131,39 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, struct amdgpu_vm *vm, enum bo_vm_match map_type, struct bo_vm_reservation_context *ctx) { - struct amdgpu_bo *bo = mem->bo; struct kfd_mem_attachment *entry; - unsigned int i; + struct amdgpu_bo *bo = mem->bo; int ret; - ctx->reserved = false; - ctx->n_vms = 0; - ctx->vm_pd = NULL; ctx->sync = &mem->sync; + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&ctx->exec) { + ctx->n_vms = 0; + list_for_each_entry(entry, &mem->attachments, list) { + if ((vm && vm != entry->bo_va->base.vm) || + (entry->is_mapped != map_type + && map_type != BO_VM_ALL)) + continue; - INIT_LIST_HEAD(&ctx->list); - INIT_LIST_HEAD(&ctx->duplicates); - - list_for_each_entry(entry, &mem->attachments, list) { - if ((vm && vm != entry->bo_va->base.vm) || - (entry->is_mapped != map_type - && map_type != BO_VM_ALL)) - continue; - - ctx->n_vms++; - } - - if (ctx->n_vms != 0) { - ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), - GFP_KERNEL); - if (!ctx->vm_pd) - return -ENOMEM; - } - - ctx->kfd_bo.priority = 0; - ctx->kfd_bo.tv.bo = &bo->tbo; - ctx->kfd_bo.tv.num_shared = 1; - list_add(&ctx->kfd_bo.tv.head, &ctx->list); - - i = 0; - list_for_each_entry(entry, &mem->attachments, list) { - if ((vm && vm != entry->bo_va->base.vm) || - (entry->is_mapped != map_type - && map_type != BO_VM_ALL)) - continue; - - amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, - &ctx->vm_pd[i]); - i++; - } + ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm, + &ctx->exec, 2); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; + ++ctx->n_vms; + } - ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates); - if (ret) { - pr_err("Failed to reserve buffers in ttm.\n"); - kfree(ctx->vm_pd); - ctx->vm_pd = NULL; - return ret; + ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; } - - ctx->reserved = true; return 0; + +error: + pr_err("Failed to reserve buffers in ttm.\n"); + drm_exec_fini(&ctx->exec); + return ret; } /** @@ -1224,15 +1184,8 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, if (wait) ret = amdgpu_sync_wait(ctx->sync, intr); - if (ctx->reserved) - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); - kfree(ctx->vm_pd); - + drm_exec_fini(&ctx->exec); ctx->sync = NULL; - - ctx->reserved = false; - ctx->vm_pd = NULL; - return ret; } @@ -1854,7 +1807,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( bool use_release_notifier = (mem->bo->kfd_bo == mem); struct kfd_mem_attachment *entry, *tmp; struct bo_vm_reservation_context ctx; - struct ttm_validate_buffer *bo_list_entry; unsigned int mapped_to_gpu_memory; int ret; bool is_imported = false; @@ -1882,9 +1834,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Make sure restore workers don't access the BO any more */ - bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); - list_del(&bo_list_entry->head); + list_del(&mem->validate_list); mutex_unlock(&process_info->lock); /* Cleanup user pages and MMU notifiers */ @@ -2451,14 +2402,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Move all invalidated BOs to the userptr_inval_list */ list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_valid_list, - validate_list.head) + validate_list) if (mem->invalid) - list_move_tail(&mem->validate_list.head, + list_move_tail(&mem->validate_list, &process_info->userptr_inval_list); /* Go through userptr_inval_list and update any invalid user_pages */ list_for_each_entry(mem, &process_info->userptr_inval_list, - validate_list.head) { + validate_list) { invalid = mem->invalid; if (!invalid) /* BO hasn't been invalidated since the last @@ -2538,50 +2489,41 @@ unlock_out: */ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) { - struct amdgpu_bo_list_entry *pd_bo_list_entries; - struct list_head resv_list, duplicates; - struct ww_acquire_ctx ticket; + struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_sync sync; + struct drm_exec exec; struct amdgpu_vm *peer_vm; struct kgd_mem *mem, *tmp_mem; struct amdgpu_bo *bo; - struct ttm_operation_ctx ctx = { false, false }; - int i, ret; - - pd_bo_list_entries = kcalloc(process_info->n_vms, - sizeof(struct amdgpu_bo_list_entry), - GFP_KERNEL); - if (!pd_bo_list_entries) { - pr_err("%s: Failed to allocate PD BO list entries\n", __func__); - ret = -ENOMEM; - goto out_no_mem; - } - - INIT_LIST_HEAD(&resv_list); - INIT_LIST_HEAD(&duplicates); + int ret; - /* Get all the page directory BOs that need to be reserved */ - i = 0; - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) - amdgpu_vm_get_pd_bo(peer_vm, &resv_list, - &pd_bo_list_entries[i++]); - /* Add the userptr_inval_list entries to resv_list */ - list_for_each_entry(mem, &process_info->userptr_inval_list, - validate_list.head) { - list_add_tail(&mem->resv_list.head, &resv_list); - mem->resv_list.bo = mem->validate_list.bo; - mem->resv_list.num_shared = mem->validate_list.num_shared; - } + amdgpu_sync_create(&sync); + drm_exec_init(&exec, 0); /* Reserve all BOs and page tables for validation */ - ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); - WARN(!list_empty(&duplicates), "Duplicates should be empty"); - if (ret) - goto out_free; + drm_exec_until_all_locked(&exec) { + /* Reserve all the page directories */ + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unreserve_out; + } - amdgpu_sync_create(&sync); + /* Reserve the userptr_inval_list entries to resv_list */ + list_for_each_entry(mem, &process_info->userptr_inval_list, + validate_list) { + struct drm_gem_object *gobj; + + gobj = &mem->bo->tbo.base; + ret = drm_exec_prepare_obj(&exec, gobj, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unreserve_out; + } + } ret = process_validate_vms(process_info); if (ret) @@ -2590,7 +2532,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) /* Validate BOs and update GPUVM page tables */ list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, - validate_list.head) { + validate_list) { struct kfd_mem_attachment *attachment; bo = mem->bo; @@ -2632,12 +2574,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ret = process_update_pds(process_info, &sync); unreserve_out: - ttm_eu_backoff_reservation(&ticket, &resv_list); + drm_exec_fini(&exec); amdgpu_sync_wait(&sync, false); amdgpu_sync_free(&sync); -out_free: - kfree(pd_bo_list_entries); -out_no_mem: return ret; } @@ -2653,7 +2592,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, - validate_list.head) { + validate_list) { bool valid; /* keep mem without hmm range at userptr_inval_list */ @@ -2677,7 +2616,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i continue; } - list_move_tail(&mem->validate_list.head, + list_move_tail(&mem->validate_list, &process_info->userptr_valid_list); } @@ -2787,50 +2726,44 @@ unlock_out: */ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) { - struct amdgpu_bo_list_entry *pd_bo_list; struct amdkfd_process_info *process_info = info; struct amdgpu_vm *peer_vm; struct kgd_mem *mem; - struct bo_vm_reservation_context ctx; struct amdgpu_amdkfd_fence *new_fence; - int ret = 0, i; struct list_head duplicate_save; struct amdgpu_sync sync_obj; unsigned long failed_size = 0; unsigned long total_size = 0; + struct drm_exec exec; + int ret; INIT_LIST_HEAD(&duplicate_save); - INIT_LIST_HEAD(&ctx.list); - INIT_LIST_HEAD(&ctx.duplicates); - - pd_bo_list = kcalloc(process_info->n_vms, - sizeof(struct amdgpu_bo_list_entry), - GFP_KERNEL); - if (!pd_bo_list) - return -ENOMEM; - i = 0; mutex_lock(&process_info->lock); - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) - amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); - /* Reserve all BOs and page tables/directory. Add all BOs from - * kfd_bo_list to ctx.list - */ - list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) { - - list_add_tail(&mem->resv_list.head, &ctx.list); - mem->resv_list.bo = mem->validate_list.bo; - mem->resv_list.num_shared = mem->validate_list.num_shared; - } + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto ttm_reserve_fail; + } - ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, - false, &duplicate_save); - if (ret) { - pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); - goto ttm_reserve_fail; + /* Reserve all BOs and page tables/directory. Add all BOs from + * kfd_bo_list to ctx.list + */ + list_for_each_entry(mem, &process_info->kfd_bo_list, + validate_list) { + struct drm_gem_object *gobj; + + gobj = &mem->bo->tbo.base; + ret = drm_exec_prepare_obj(&exec, gobj, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto ttm_reserve_fail; + } } amdgpu_sync_create(&sync_obj); @@ -2848,7 +2781,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) /* Validate BOs and map them to GPUVM (update VM page tables). */ list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) { + validate_list) { struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; @@ -2921,8 +2854,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) *ef = dma_fence_get(&new_fence->base); /* Attach new eviction fence to all BOs except pinned ones */ - list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) { + list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { if (mem->bo->tbo.pin_count) continue; @@ -2941,11 +2873,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) } validate_map_fail: - ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); amdgpu_sync_free(&sync_obj); ttm_reserve_fail: + drm_exec_fini(&exec); mutex_unlock(&process_info->lock); - kfree(pd_bo_list); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 143d11afe0e5..c5c5f2eb76c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" @@ -360,6 +361,23 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } +/** + * amdgpu_vm_lock_pd - lock PD in drm_exec + * + * @vm: vm providing the BOs + * @exec: drm execution context + * @num_fences: number of extra fences to reserve + * + * Lock the VM root PD in the DRM execution context. + */ +int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences) +{ + /* We need at least two fences for the VM PD/PT updates */ + return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, + 2 + num_fences); +} + /** * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9c85d494f2a2..3e29886e5535 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -36,6 +36,8 @@ #include "amdgpu_ring.h" #include "amdgpu_ids.h" +struct drm_exec; + struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; @@ -399,6 +401,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); +int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences); bool amdgpu_vm_ready(struct amdgpu_vm *vm); uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 5ff1a5a89d96..aa32b06eea77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -24,6 +24,8 @@ #include #include #include +#include + #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" @@ -1455,37 +1457,34 @@ struct svm_validate_context { struct svm_range *prange; bool intr; DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); - struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; - struct list_head validate_list; - struct ww_acquire_ctx ticket; + struct drm_exec exec; }; -static int svm_range_reserve_bos(struct svm_validate_context *ctx) +static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) { struct kfd_process_device *pdd; struct amdgpu_vm *vm; uint32_t gpuidx; int r; - INIT_LIST_HEAD(&ctx->validate_list); - for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { - pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); - if (!pdd) { - pr_debug("failed to find device idx %d\n", gpuidx); - return -EINVAL; - } - vm = drm_priv_to_vm(pdd->drm_priv); - - ctx->tv[gpuidx].bo = &vm->root.bo->tbo; - ctx->tv[gpuidx].num_shared = 4; - list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); - } + drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0); + drm_exec_until_all_locked(&ctx->exec) { + for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { + pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + r = -EINVAL; + goto unreserve_out; + } + vm = drm_priv_to_vm(pdd->drm_priv); - r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, - ctx->intr, NULL); - if (r) { - pr_debug("failed %d to reserve bo\n", r); - return r; + r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(r)) { + pr_debug("failed %d to reserve bo\n", r); + goto unreserve_out; + } + } } for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { @@ -1508,13 +1507,13 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) return 0; unreserve_out: - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); + drm_exec_fini(&ctx->exec); return r; } static void svm_range_unreserve_bos(struct svm_validate_context *ctx) { - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); + drm_exec_fini(&ctx->exec); } static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) @@ -1613,7 +1612,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, goto free_ctx; } - svm_range_reserve_bos(ctx); + svm_range_reserve_bos(ctx, intr); p = container_of(prange->svms, struct kfd_process, svms); owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, -- cgit v1.2.3 From 8923137dbe4b24863694c35284df51d036b5dd5e Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Wed, 19 Jul 2023 16:20:58 -0500 Subject: drm/amdkfd: avoid svm dump when dynamic debug disabled Set dynamic_svm_range_dump macro to avoid iterating over SVM lists from svm_range_debug_dump when dynamic debug is disabled. Otherwise, it could drop performance, specially with big number of SVM ranges. Make sure both svm_range_set_attr and svm_range_debug_dump functions are dynamically enabled to print svm_range_debug_dump debug traces. Signed-off-by: Alex Sierra Tested-by: Alex Sierra Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 479c4f66afa7..1b50eae051a4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -46,6 +46,8 @@ * page table is updated. */ #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) +#define dynamic_svm_range_dump(svms) \ + _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) /* Giant svm range split into smaller ranges based on this, it is decided using * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to @@ -3563,7 +3565,7 @@ out_unlock_range: break; } - svm_range_debug_dump(svms); + dynamic_svm_range_dump(svms); mutex_unlock(&svms->lock); mmap_read_unlock(mm); -- cgit v1.2.3 From ab3400eb94597ef009c1395c7c789736d070613b Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Thu, 27 Jul 2023 15:32:37 -0500 Subject: drm/amdkfd: avoid unmap dma address when svm_ranges are split DMA address reference within svm_ranges should be unmapped only after the memory has been released from the system. In case of range splitting, the DMA address information should be copied to the corresponding range after this has split. But leaving dma mapping intact. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 7 ++-- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 61 +++++++++++++++++++++++--------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +- 3 files changed, 50 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 709ac885ca6d..7d82c7da223a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -461,7 +461,6 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, 0, node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); - svm_range_free_dma_mappings(prange); out_free: kvfree(buf); @@ -543,10 +542,12 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, addr = next; } - if (cpages) + if (cpages) { prange->actual_loc = best_loc; - else + svm_range_free_dma_mappings(prange, true); + } else { svm_range_vram_node_free(prange); + } return r < 0 ? r : 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 01c7de2d6e19..243ef74708e3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -243,7 +243,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, } } -void svm_range_free_dma_mappings(struct svm_range *prange) +void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma) { struct kfd_process_device *pdd; dma_addr_t *dma_addr; @@ -264,13 +264,14 @@ void svm_range_free_dma_mappings(struct svm_range *prange) continue; } dev = &pdd->dev->adev->pdev->dev; - svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); + if (unmap_dma) + svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); kvfree(dma_addr); prange->dma_addr[gpuidx] = NULL; } } -static void svm_range_free(struct svm_range *prange, bool update_mem_usage) +static void svm_range_free(struct svm_range *prange, bool do_unmap) { uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT; struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); @@ -279,9 +280,9 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage) prange->start, prange->last); svm_range_vram_node_free(prange); - svm_range_free_dma_mappings(prange); + svm_range_free_dma_mappings(prange, do_unmap); - if (update_mem_usage && !p->xnack_enabled) { + if (do_unmap && !p->xnack_enabled) { pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); amdgpu_amdkfd_unreserve_mem_limit(NULL, size, KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); @@ -853,6 +854,37 @@ static void svm_range_debug_dump(struct svm_range_list *svms) } } +static void * +svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements, + uint64_t offset) +{ + unsigned char *dst; + + dst = kvmalloc_array(num_elements, size, GFP_KERNEL); + if (!dst) + return NULL; + memcpy(dst, (unsigned char *)psrc + offset, num_elements * size); + + return (void *)dst; +} + +static int +svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) +{ + int i; + + for (i = 0; i < MAX_GPU_INSTANCE; i++) { + if (!src->dma_addr[i]) + continue; + dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], + sizeof(*src->dma_addr[i]), src->npages, 0); + if (!dst->dma_addr[i]) + return -ENOMEM; + } + + return 0; +} + static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, @@ -867,22 +899,16 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size, if (!pold) return 0; - new = kvmalloc_array(new_n, size, GFP_KERNEL); + d = (new_start - old_start) * size; + new = svm_range_copy_array(pold, size, new_n, d); if (!new) return -ENOMEM; - - d = (new_start - old_start) * size; - memcpy(new, pold + d, new_n * size); - - old = kvmalloc_array(old_n, size, GFP_KERNEL); + d = (new_start == old_start) ? new_n * size : 0; + old = svm_range_copy_array(pold, size, old_n, d); if (!old) { kvfree(new); return -ENOMEM; } - - d = (new_start == old_start) ? new_n * size : 0; - memcpy(old, pold + d, old_n * size); - kvfree(pold); *(void **)ppold = old; *(void **)ppnew = new; @@ -1928,7 +1954,10 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new = svm_range_new(old->svms, old->start, old->last, false); if (!new) return NULL; - + if (svm_range_copy_dma_addrs(new, old)) { + svm_range_free(new, false); + return NULL; + } if (old->svm_bo) { new->ttm_res = old->ttm_res; new->offset = old->offset; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 21b14510882b..9e668eeefb32 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -183,7 +183,7 @@ void svm_range_add_list_work(struct svm_range_list *svms, void schedule_deferred_list_work(struct svm_range_list *svms); void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, unsigned long offset, unsigned long npages); -void svm_range_free_dma_mappings(struct svm_range *prange); +void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma); int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, uint64_t *svm_priv_data_size); int kfd_criu_checkpoint_svm(struct kfd_process *p, -- cgit v1.2.3 From 475968fe4a05b060864524a403e0d4a53f79aed0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 4 Aug 2023 15:29:17 +0200 Subject: drm/amdkfd: fix build failure without CONFIG_DYNAMIC_DEBUG When CONFIG_DYNAMIC_DEBUG is disabled altogether, calling _dynamic_func_call_no_desc() does not work: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c: In function 'svm_range_set_attr': drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:52:9: error: implicit declaration of function '_dynamic_func_call_no_desc' [-Werror=implicit-function-declaration] 52 | _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) | ^~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:3564:9: note: in expansion of macro 'dynamic_svm_range_dump' 3564 | dynamic_svm_range_dump(svms); | ^~~~~~~~~~~~~~~~~~~~~~ Add a compile-time conditional in addition to the runtime check. Fixes: 8923137dbe4b ("drm/amdkfd: avoid svm dump when dynamic debug disabled") Signed-off-by: Arnd Bergmann Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 243ef74708e3..dbbe6559d6bc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -48,8 +49,13 @@ * page table is updated. */ #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) +#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) #define dynamic_svm_range_dump(svms) \ _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) +#else +#define dynamic_svm_range_dump(svms) \ + do { if (0) svm_range_debug_dump(svms); } while (0) +#endif /* Giant svm range split into smaller ranges based on this, it is decided using * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to -- cgit v1.2.3