diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 |
1 files changed, 10 insertions, 62 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f2ce5a0defd9..28f5e8b21a99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -375,7 +375,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) return; - vm->bulk_moveable = false; + ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) amdgpu_vm_bo_relocated(base); else @@ -638,36 +638,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, } /** - * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag - * - * @bo: BO which was removed from the LRU - * - * Make sure the bulk_moveable flag is updated when a BO is removed from the - * LRU. - */ -void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) -{ - struct amdgpu_bo *abo; - struct amdgpu_vm_bo_base *bo_base; - - if (!amdgpu_bo_is_amdgpu_bo(bo)) - return; - - if (bo->pin_count) - return; - - abo = ttm_to_amdgpu_bo(bo); - if (!abo->parent) - return; - for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { - struct amdgpu_vm *vm = bo_base->vm; - - if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) - vm->bulk_moveable = false; - } - -} -/** * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU * * @adev: amdgpu device pointer @@ -679,33 +649,9 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct amdgpu_vm_bo_base *bo_base; - - if (vm->bulk_moveable) { - spin_lock(&adev->mman.bdev.lru_lock); - ttm_lru_bulk_move_tail(&vm->lru_bulk_move); - spin_unlock(&adev->mman.bdev.lru_lock); - return; - } - - ttm_lru_bulk_move_init(&vm->lru_bulk_move); - spin_lock(&adev->mman.bdev.lru_lock); - list_for_each_entry(bo_base, &vm->idle, vm_status) { - struct amdgpu_bo *bo = bo_base->bo; - struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); - - if (!bo->parent) - continue; - - ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); - if (shadow) - ttm_bo_move_to_lru_tail(&shadow->tbo, - &vm->lru_bulk_move); - } + ttm_lru_bulk_move_tail(&vm->lru_bulk_move); spin_unlock(&adev->mman.bdev.lru_lock); - - vm->bulk_moveable = true; } /** @@ -728,8 +674,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r; - vm->bulk_moveable &= list_empty(&vm->evicted); - list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); @@ -1047,10 +991,16 @@ static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) if (!entry->bo) return; + shadow = amdgpu_bo_shadowed(entry->bo); + if (shadow) { + ttm_bo_set_bulk_move(&shadow->tbo, NULL); + amdgpu_bo_unref(&shadow); + } + + ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); entry->bo->vm_bo = NULL; list_del(&entry->vm_status); - amdgpu_bo_unref(&shadow); amdgpu_bo_unref(&entry->bo); } @@ -1070,8 +1020,6 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor cursor; struct amdgpu_vm_bo_base *entry; - vm->bulk_moveable = false; - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) amdgpu_vm_free_table(entry); @@ -2651,7 +2599,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, if (bo) { if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) - vm->bulk_moveable = false; + ttm_bo_set_bulk_move(&bo->tbo, NULL); for (base = &bo_va->base.bo->vm_bo; *base; base = &(*base)->next) { |