diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 791 |
1 files changed, 318 insertions, 473 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 5b166fa03684..7acd5fc9d032 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -12,7 +12,7 @@ #include <drm/drm_print.h> #include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_tt.h> -#include <drm/xe_drm.h> +#include <uapi/drm/xe_drm.h> #include <linux/ascii85.h> #include <linux/delay.h> #include <linux/kthread.h> @@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm) if (q->lr.pfence) { long timeout = dma_fence_wait(q->lr.pfence, false); - if (timeout < 0) + /* Only -ETIME on fence indicates VM needs to be killed */ + if (timeout < 0 || q->lr.pfence->error == -ETIME) return -ETIME; + dma_fence_put(q->lr.pfence); q->lr.pfence = NULL; } @@ -273,6 +275,8 @@ out_up_write: * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM * @vm: The VM. * @q: The exec_queue + * + * Note that this function might be called multiple times on the same queue. */ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) { @@ -280,8 +284,10 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) return; down_write(&vm->lock); - list_del(&q->lr.link); - --vm->preempt.num_exec_queues; + if (!list_empty(&q->lr.link)) { + list_del_init(&q->lr.link); + --vm->preempt.num_exec_queues; + } if (q->lr.pfence) { dma_fence_enable_sw_signaling(q->lr.pfence); dma_fence_put(q->lr.pfence); @@ -311,7 +317,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm) #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 -static void xe_vm_kill(struct xe_vm *vm, bool unlocked) +/** + * xe_vm_kill() - VM Kill + * @vm: The VM. + * @unlocked: Flag indicates the VM's dma-resv is not held + * + * Kill the VM by setting banned flag indicated VM is no longer available for + * use. If in preempt fence mode, also kill all exec queue attached to the VM. + */ +void xe_vm_kill(struct xe_vm *vm, bool unlocked) { struct xe_exec_queue *q; @@ -708,6 +722,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm) list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; } +static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) { + if (!vops->pt_update_ops[i].num_ops) + continue; + + vops->pt_update_ops[i].ops = + kmalloc_array(vops->pt_update_ops[i].num_ops, + sizeof(*vops->pt_update_ops[i].ops), + GFP_KERNEL); + if (!vops->pt_update_ops[i].ops) + return array_of_binds ? -ENOBUFS : -ENOMEM; + } + + return 0; +} + +static void xe_vma_ops_fini(struct xe_vma_ops *vops) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + kfree(vops->pt_update_ops[i].ops); +} + +static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask) +{ + int i; + + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + if (BIT(i) & tile_mask) + ++vops->pt_update_ops[i].num_ops; +} + static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, u8 tile_mask) { @@ -735,6 +785,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, xe_vm_populate_rebind(op, vma, tile_mask); list_add_tail(&op->link, &vops->list); + xe_vma_ops_incr_pt_update_ops(vops, tile_mask); return 0; } @@ -751,7 +802,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) struct xe_vma *vma, *next; struct xe_vma_ops vops; struct xe_vma_op *op, *next_op; - int err; + int err, i; lockdep_assert_held(&vm->lock); if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || @@ -759,6 +810,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) return 0; xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) + vops.pt_update_ops[i].wait_vm_bookkeep = true; xe_vm_assert_held(vm); list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { @@ -775,6 +828,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) goto free_ops; } + err = xe_vma_ops_alloc(&vops, false); + if (err) + goto free_ops; + fence = ops_execute(vm, &vops); if (IS_ERR(fence)) { err = PTR_ERR(fence); @@ -789,6 +846,7 @@ free_ops: list_del(&op->link); kfree(op); } + xe_vma_ops_fini(&vops); return err; } @@ -798,6 +856,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma struct dma_fence *fence = NULL; struct xe_vma_ops vops; struct xe_vma_op *op, *next_op; + struct xe_tile *tile; + u8 id; int err; lockdep_assert_held(&vm->lock); @@ -805,17 +865,30 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + for_each_tile(tile, vm->xe, id) { + vops.pt_update_ops[id].wait_vm_bookkeep = true; + vops.pt_update_ops[tile->id].q = + xe_tile_migrate_exec_queue(tile); + } err = xe_vm_ops_add_rebind(&vops, vma, tile_mask); if (err) return ERR_PTR(err); + err = xe_vma_ops_alloc(&vops, false); + if (err) { + fence = ERR_PTR(err); + goto free_ops; + } + fence = ops_execute(vm, &vops); +free_ops: list_for_each_entry_safe(op, next_op, &vops.list, link) { list_del(&op->link); kfree(op); } + xe_vma_ops_fini(&vops); return fence; } @@ -1122,7 +1195,7 @@ static const struct drm_gpuvm_ops gpuvm_ops = { .vm_free = xe_vm_free, }; -static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index) +static u64 pde_encode_pat_index(u16 pat_index) { u64 pte = 0; @@ -1135,8 +1208,7 @@ static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index) return pte; } -static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index, - u32 pt_level) +static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level) { u64 pte = 0; @@ -1177,12 +1249,11 @@ static u64 pte_encode_ps(u32 pt_level) static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, const u16 pat_index) { - struct xe_device *xe = xe_bo_device(bo); u64 pde; pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pde |= XE_PAGE_PRESENT | XE_PAGE_RW; - pde |= pde_encode_pat_index(xe, pat_index); + pde |= pde_encode_pat_index(pat_index); return pde; } @@ -1190,12 +1261,11 @@ static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, u16 pat_index, u32 pt_level) { - struct xe_device *xe = xe_bo_device(bo); u64 pte; pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pte |= XE_PAGE_PRESENT | XE_PAGE_RW; - pte |= pte_encode_pat_index(xe, pat_index, pt_level); + pte |= pte_encode_pat_index(pat_index, pt_level); pte |= pte_encode_ps(pt_level); if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) @@ -1207,14 +1277,12 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, u16 pat_index, u32 pt_level) { - struct xe_device *xe = xe_vma_vm(vma)->xe; - pte |= XE_PAGE_PRESENT; if (likely(!xe_vma_read_only(vma))) pte |= XE_PAGE_RW; - pte |= pte_encode_pat_index(xe, pat_index, pt_level); + pte |= pte_encode_pat_index(pat_index, pt_level); pte |= pte_encode_ps(pt_level); if (unlikely(xe_vma_is_null(vma))) @@ -1234,7 +1302,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr, pte = addr; pte |= XE_PAGE_PRESENT | XE_PAGE_RW; - pte |= pte_encode_pat_index(xe, pat_index, pt_level); + pte |= pte_encode_pat_index(pat_index, pt_level); pte |= pte_encode_ps(pt_level); if (devmem) @@ -1333,6 +1401,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) init_rwsem(&vm->userptr.notifier_lock); spin_lock_init(&vm->userptr.invalidated_lock); + ttm_lru_bulk_move_init(&vm->lru_bulk_move); + INIT_WORK(&vm->destroy_work, vm_destroy_work_func); INIT_LIST_HEAD(&vm->preempt.exec_queues); @@ -1412,19 +1482,13 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) /* Kernel migration VM shouldn't have a circular loop.. */ if (!(flags & XE_VM_FLAG_MIGRATION)) { for_each_tile(tile, xe, id) { - struct xe_gt *gt = tile->primary_gt; - struct xe_vm *migrate_vm; struct xe_exec_queue *q; u32 create_flags = EXEC_QUEUE_FLAG_VM; if (!vm->pt_root[id]) continue; - migrate_vm = xe_migrate_get_vm(tile->migrate); - q = xe_exec_queue_create_class(xe, gt, migrate_vm, - XE_ENGINE_CLASS_COPY, - create_flags); - xe_vm_put(migrate_vm); + q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_close; @@ -1437,13 +1501,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) if (number_tiles > 1) vm->composite_fence_ctx = dma_fence_context_alloc(1); - mutex_lock(&xe->usm.lock); - if (flags & XE_VM_FLAG_FAULT_MODE) - xe->usm.num_vm_in_fault_mode++; - else if (!(flags & XE_VM_FLAG_MIGRATION)) - xe->usm.num_vm_in_non_fault_mode++; - mutex_unlock(&xe->usm.lock); - trace_xe_vm_create(vm); return vm; @@ -1458,6 +1515,7 @@ err_no_resv: mutex_destroy(&vm->snap_mutex); for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); + ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); kfree(vm); if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); @@ -1556,11 +1614,6 @@ void xe_vm_close_and_put(struct xe_vm *vm) up_write(&vm->lock); mutex_lock(&xe->usm.lock); - if (vm->flags & XE_VM_FLAG_FAULT_MODE) - xe->usm.num_vm_in_fault_mode--; - else if (!(vm->flags & XE_VM_FLAG_MIGRATION)) - xe->usm.num_vm_in_non_fault_mode--; - if (vm->usm.asid) { void *lookup; @@ -1601,6 +1654,12 @@ static void vm_destroy_work_func(struct work_struct *w) XE_WARN_ON(vm->pt_root[id]); trace_xe_vm_free(vm); + + ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); + + if (vm->xef) + xe_file_put(vm->xef); + kfree(vm); } @@ -1637,147 +1696,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) return q ? q : vm->q[0]; } -static struct dma_fence * -xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - bool first_op, bool last_op) -{ - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - struct xe_tile *tile; - struct dma_fence *fence = NULL; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; - int cur_fence = 0; - int number_tiles = hweight8(vma->tile_present); - int err; - u8 id; - - trace_xe_vma_unbind(vma); - - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - } - - for_each_tile(tile, vm->xe, id) { - if (!(vma->tile_present & BIT(id))) - goto next; - - fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id], - first_op ? syncs : NULL, - first_op ? num_syncs : 0); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - goto err_fences; - } - - if (fences) - fences[cur_fence++] = fence; - -next: - if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) - q = list_next_entry(q, multi_gt_list); - } - - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - err = -ENOMEM; - goto err_fences; - } - } - - fence = cf ? &cf->base : !fence ? - xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; - - return fence; - -err_fences: - if (fences) { - while (cur_fence) - dma_fence_put(fences[--cur_fence]); - kfree(fences); - } - - return ERR_PTR(err); -} - -static struct dma_fence * -xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_sync_entry *syncs, u32 num_syncs, - u8 tile_mask, bool first_op, bool last_op) -{ - struct xe_tile *tile; - struct dma_fence *fence; - struct dma_fence **fences = NULL; - struct dma_fence_array *cf = NULL; - struct xe_vm *vm = xe_vma_vm(vma); - int cur_fence = 0; - int number_tiles = hweight8(tile_mask); - int err; - u8 id; - - trace_xe_vma_bind(vma); - - if (number_tiles > 1) { - fences = kmalloc_array(number_tiles, sizeof(*fences), - GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - } - - for_each_tile(tile, vm->xe, id) { - if (!(tile_mask & BIT(id))) - goto next; - - fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id], - first_op ? syncs : NULL, - first_op ? num_syncs : 0, - vma->tile_present & BIT(id)); - if (IS_ERR(fence)) { - err = PTR_ERR(fence); - goto err_fences; - } - - if (fences) - fences[cur_fence++] = fence; - -next: - if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) - q = list_next_entry(q, multi_gt_list); - } - - if (fences) { - cf = dma_fence_array_create(number_tiles, fences, - vm->composite_fence_ctx, - vm->composite_fence_seqno++, - false); - if (!cf) { - --vm->composite_fence_seqno; - err = -ENOMEM; - goto err_fences; - } - } - - return cf ? &cf->base : fence; - -err_fences: - if (fences) { - while (cur_fence) - dma_fence_put(fences[--cur_fence]); - kfree(fences); - } - - return ERR_PTR(err); -} - static struct xe_user_fence * find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) { @@ -1793,48 +1711,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) return NULL; } -static struct dma_fence * -xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, - struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs, - u8 tile_mask, bool immediate, bool first_op, bool last_op) -{ - struct dma_fence *fence; - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - - xe_vm_assert_held(vm); - xe_bo_assert_held(bo); - - if (immediate) { - fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask, - first_op, last_op); - if (IS_ERR(fence)) - return fence; - } else { - xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); - - fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); - } - - return fence; -} - -static struct dma_fence * -xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, struct xe_sync_entry *syncs, - u32 num_syncs, bool first_op, bool last_op) -{ - struct dma_fence *fence; - - xe_vm_assert_held(vm); - xe_bo_assert_held(xe_vma_bo(vma)); - - fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op); - if (IS_ERR(fence)) - return fence; - - return fence; -} - #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \ DRM_XE_VM_CREATE_FLAG_LR_MODE | \ DRM_XE_VM_CREATE_FLAG_FAULT_MODE) @@ -1875,14 +1751,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) return -EINVAL; - if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && - xe_device_in_non_fault_mode(xe))) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) && - xe_device_in_fault_mode(xe))) - return -EINVAL; - if (XE_IOCTL_DBG(xe, args->extensions)) return -EINVAL; @@ -1916,7 +1784,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, } args->vm_id = id; - vm->xef = xef; + vm->xef = xe_file_get(xef); /* Record BO memory for VM pagetable created against client */ for_each_tile(tile, xe, id) @@ -1975,21 +1843,6 @@ static const u32 region_to_mem_type[] = { XE_PL_VRAM1, }; -static struct dma_fence * -xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, - struct xe_exec_queue *q, struct xe_sync_entry *syncs, - u32 num_syncs, bool first_op, bool last_op) -{ - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); - - if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) { - return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, - vma->tile_mask, true, first_op, last_op); - } else { - return xe_exec_queue_last_fence_get(wait_exec_queue, vm); - } -} - static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) { @@ -2277,14 +2130,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) return err; } - -static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, - struct drm_gpuva_ops *ops, - struct xe_sync_entry *syncs, u32 num_syncs, - struct xe_vma_ops *vops, bool last) +static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, + struct xe_vma_ops *vops) { struct xe_device *xe = vm->xe; - struct xe_vma_op *last_op = NULL; struct drm_gpuva_op *__op; struct xe_tile *tile; u8 id, tile_mask = 0; @@ -2298,19 +2147,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma *vma; - bool first = list_empty(&vops->list); unsigned int flags = 0; INIT_LIST_HEAD(&op->link); list_add_tail(&op->link, &vops->list); - - if (first) { - op->flags |= XE_VMA_OP_FIRST; - op->num_syncs = num_syncs; - op->syncs = syncs; - } - - op->q = q; op->tile_mask = tile_mask; switch (op->base.op) { @@ -2329,6 +2169,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, return PTR_ERR(vma); op->map.vma = vma; + if (op->map.immediate || !xe_vm_in_fault_mode(vm)) + xe_vma_ops_incr_pt_update_ops(vops, + op->tile_mask); break; } case DRM_GPUVA_OP_REMAP: @@ -2373,6 +2216,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx", (ULL)op->remap.start, (ULL)op->remap.range); + } else { + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); } } @@ -2409,203 +2254,30 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx", (ULL)op->remap.start, (ULL)op->remap.range); + } else { + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); } } + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; } case DRM_GPUVA_OP_UNMAP: case DRM_GPUVA_OP_PREFETCH: - /* Nothing to do */ + /* FIXME: Need to skip some prefetch ops */ + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); } - last_op = op; - err = xe_vma_op_commit(vm, op); if (err) return err; } - /* FIXME: Unhandled corner case */ - XE_WARN_ON(!last_op && last && !list_empty(&vops->list)); - - if (!last_op) - return 0; - - if (last) { - last_op->flags |= XE_VMA_OP_LAST; - last_op->num_syncs = num_syncs; - last_op->syncs = syncs; - } - return 0; } -static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) -{ - struct dma_fence *fence = NULL; - - lockdep_assert_held(&vm->lock); - - xe_vm_assert_held(vm); - xe_bo_assert_held(xe_vma_bo(vma)); - - switch (op->base.op) { - case DRM_GPUVA_OP_MAP: - fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), - op->syncs, op->num_syncs, - op->tile_mask, - op->map.immediate || !xe_vm_in_fault_mode(vm), - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - case DRM_GPUVA_OP_REMAP: - { - bool prev = !!op->remap.prev; - bool next = !!op->remap.next; - - if (!op->remap.unmap_done) { - if (prev || next) - vma->gpuva.flags |= XE_VMA_FIRST_REBIND; - fence = xe_vm_unbind(vm, vma, op->q, op->syncs, - op->num_syncs, - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST && - !prev && !next); - if (IS_ERR(fence)) - break; - op->remap.unmap_done = true; - } - - if (prev) { - op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND; - dma_fence_put(fence); - fence = xe_vm_bind(vm, op->remap.prev, op->q, - xe_vma_bo(op->remap.prev), op->syncs, - op->num_syncs, - op->remap.prev->tile_mask, true, - false, - op->flags & XE_VMA_OP_LAST && !next); - op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND; - if (IS_ERR(fence)) - break; - op->remap.prev = NULL; - } - - if (next) { - op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND; - dma_fence_put(fence); - fence = xe_vm_bind(vm, op->remap.next, op->q, - xe_vma_bo(op->remap.next), - op->syncs, op->num_syncs, - op->remap.next->tile_mask, true, - false, op->flags & XE_VMA_OP_LAST); - op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND; - if (IS_ERR(fence)) - break; - op->remap.next = NULL; - } - - break; - } - case DRM_GPUVA_OP_UNMAP: - fence = xe_vm_unbind(vm, vma, op->q, op->syncs, - op->num_syncs, op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - case DRM_GPUVA_OP_PREFETCH: - fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs, - op->flags & XE_VMA_OP_FIRST, - op->flags & XE_VMA_OP_LAST); - break; - default: - drm_warn(&vm->xe->drm, "NOT POSSIBLE"); - } - - if (IS_ERR(fence)) - trace_xe_vma_fail(vma); - - return fence; -} - -static struct dma_fence * -__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, - struct xe_vma_op *op) -{ - struct dma_fence *fence; - int err; - -retry_userptr: - fence = op_execute(vm, vma, op); - if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) { - lockdep_assert_held_write(&vm->lock); - - if (op->base.op == DRM_GPUVA_OP_REMAP) { - if (!op->remap.unmap_done) - vma = gpuva_to_vma(op->base.remap.unmap->va); - else if (op->remap.prev) - vma = op->remap.prev; - else - vma = op->remap.next; - } - - if (xe_vma_is_userptr(vma)) { - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); - if (!err) - goto retry_userptr; - - fence = ERR_PTR(err); - trace_xe_vma_fail(vma); - } - } - - return fence; -} - -static struct dma_fence * -xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) -{ - struct dma_fence *fence = ERR_PTR(-ENOMEM); - - lockdep_assert_held(&vm->lock); - - switch (op->base.op) { - case DRM_GPUVA_OP_MAP: - fence = __xe_vma_op_execute(vm, op->map.vma, op); - break; - case DRM_GPUVA_OP_REMAP: - { - struct xe_vma *vma; - - if (!op->remap.unmap_done) - vma = gpuva_to_vma(op->base.remap.unmap->va); - else if (op->remap.prev) - vma = op->remap.prev; - else - vma = op->remap.next; - - fence = __xe_vma_op_execute(vm, vma, op); - break; - } - case DRM_GPUVA_OP_UNMAP: - fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va), - op); - break; - case DRM_GPUVA_OP_PREFETCH: - fence = __xe_vma_op_execute(vm, - gpuva_to_vma(op->base.prefetch.va), - op); - break; - default: - drm_warn(&vm->xe->drm, "NOT POSSIBLE"); - } - - return fence; -} - static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, bool post_commit, bool prev_post_commit, bool next_post_commit) @@ -2788,26 +2460,157 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, return err; } +#ifdef TEST_VM_OPS_ERROR + if (vops->inject_error && + vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) + return -ENOSPC; +#endif + return 0; } +static void op_trace(struct xe_vma_op *op) +{ + switch (op->base.op) { + case DRM_GPUVA_OP_MAP: + trace_xe_vma_bind(op->map.vma); + break; + case DRM_GPUVA_OP_REMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va)); + if (op->remap.prev) + trace_xe_vma_bind(op->remap.prev); + if (op->remap.next) + trace_xe_vma_bind(op->remap.next); + break; + case DRM_GPUVA_OP_UNMAP: + trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va)); + break; + case DRM_GPUVA_OP_PREFETCH: + trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va)); + break; + default: + XE_WARN_ON("NOT POSSIBLE"); + } +} + +static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) +{ + struct xe_vma_op *op; + + list_for_each_entry(op, &vops->list, link) + op_trace(op); +} + +static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) +{ + struct xe_exec_queue *q = vops->q; + struct xe_tile *tile; + int number_tiles = 0; + u8 id; + + for_each_tile(tile, vm->xe, id) { + if (vops->pt_update_ops[id].num_ops) + ++number_tiles; + + if (vops->pt_update_ops[id].q) + continue; + + if (q) { + vops->pt_update_ops[id].q = q; + if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) + q = list_next_entry(q, multi_gt_list); + } else { + vops->pt_update_ops[id].q = vm->q[id]; + } + } + + return number_tiles; +} + static struct dma_fence *ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops) { - struct xe_vma_op *op, *next; + struct xe_tile *tile; struct dma_fence *fence = NULL; + struct dma_fence **fences = NULL; + struct dma_fence_array *cf = NULL; + int number_tiles = 0, current_fence = 0, err; + u8 id; - list_for_each_entry_safe(op, next, &vops->list, link) { - dma_fence_put(fence); - fence = xe_vma_op_execute(vm, op); - if (IS_ERR(fence)) { - drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld", - op->base.op, PTR_ERR(fence)); - fence = ERR_PTR(-ENOSPC); - break; + number_tiles = vm_ops_setup_tile_args(vm, vops); + if (number_tiles == 0) + return ERR_PTR(-ENODATA); + + if (number_tiles > 1) { + fences = kmalloc_array(number_tiles, sizeof(*fences), + GFP_KERNEL); + if (!fences) { + fence = ERR_PTR(-ENOMEM); + goto err_trace; } } + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + err = xe_pt_update_ops_prepare(tile, vops); + if (err) { + fence = ERR_PTR(err); + goto err_out; + } + } + + trace_xe_vm_ops_execute(vops); + + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + fence = xe_pt_update_ops_run(tile, vops); + if (IS_ERR(fence)) + goto err_out; + + if (fences) + fences[current_fence++] = fence; + } + + if (fences) { + cf = dma_fence_array_create(number_tiles, fences, + vm->composite_fence_ctx, + vm->composite_fence_seqno++, + false); + if (!cf) { + --vm->composite_fence_seqno; + fence = ERR_PTR(-ENOMEM); + goto err_out; + } + fence = &cf->base; + } + + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + xe_pt_update_ops_fini(tile, vops); + } + + return fence; + +err_out: + for_each_tile(tile, vm->xe, id) { + if (!vops->pt_update_ops[id].num_ops) + continue; + + xe_pt_update_ops_abort(tile, vops); + } + while (current_fence) + dma_fence_put(fences[--current_fence]); + kfree(fences); + kfree(cf); + +err_trace: + trace_xe_vm_ops_fail(vm); return fence; } @@ -2888,12 +2691,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, fence = ops_execute(vm, vops); if (IS_ERR(fence)) { err = PTR_ERR(fence); - /* FIXME: Killing VM rather than proper error handling */ - xe_vm_kill(vm, false); goto unlock; - } else { - vm_bind_ioctl_ops_fini(vm, vops, fence); } + + vm_bind_ioctl_ops_fini(vm, vops, fence); } unlock: @@ -2901,11 +2702,18 @@ unlock: return err; } -#define SUPPORTED_FLAGS \ +#define SUPPORTED_FLAGS_STUB \ (DRM_XE_VM_BIND_FLAG_READONLY | \ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \ DRM_XE_VM_BIND_FLAG_NULL | \ DRM_XE_VM_BIND_FLAG_DUMPABLE) + +#ifdef TEST_VM_OPS_ERROR +#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) +#else +#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB +#endif + #define XE_64K_PAGE_MASK 0xffffull #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) @@ -2931,7 +2739,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, sizeof(struct drm_xe_vm_bind_op), GFP_KERNEL | __GFP_ACCOUNT); if (!*bind_ops) - return -ENOMEM; + return args->num_binds > 1 ? -ENOBUFS : -ENOMEM; err = __copy_from_user(*bind_ops, bind_user, sizeof(struct drm_xe_vm_bind_op) * @@ -3070,7 +2878,16 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, return -EINVAL; } - if (bo->flags & XE_BO_FLAG_INTERNAL_64K) { + /* + * Some platforms require 64k VM_BIND alignment, + * specifically those with XE_VRAM_FLAGS_NEED64K. + * + * Other platforms may have BO's set to 64k physical placement, + * but can be mapped at 4k offsets anyway. This check is only + * there for the former case. + */ + if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) && + (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) { if (XE_IOCTL_DBG(xe, obj_offset & XE_64K_PAGE_MASK) || XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) || @@ -3250,10 +3067,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } - err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, - &vops, i == args->num_binds - 1); + err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); if (err) goto unwind_ops; + +#ifdef TEST_VM_OPS_ERROR + if (flags & FORCE_OP_ERROR) { + vops.inject_error = true; + vm->xe->vm_inject_error_position = + (vm->xe->vm_inject_error_position + 1) % + FORCE_OP_ERROR_COUNT; + } +#endif } /* Nothing to do */ @@ -3262,11 +3087,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto unwind_ops; } + err = xe_vma_ops_alloc(&vops, args->num_binds > 1); + if (err) + goto unwind_ops; + err = vm_bind_ioctl_ops_execute(vm, &vops); unwind_ops: if (err && err != -ENODATA) vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); + xe_vma_ops_fini(&vops); for (i = args->num_binds - 1; i >= 0; --i) if (ops[i]) drm_gpuva_ops_free(&vm->gpuvm, ops[i]); @@ -3337,10 +3167,11 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) { struct xe_device *xe = xe_vma_vm(vma)->xe; struct xe_tile *tile; - u32 tile_needs_invalidate = 0; - int seqno[XE_MAX_TILES_PER_DEVICE]; + struct xe_gt_tlb_invalidation_fence + fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE]; u8 id; - int ret; + u32 fence_id = 0; + int ret = 0; xe_assert(xe, !xe_vma_is_null(vma)); trace_xe_vma_invalidate(vma); @@ -3365,29 +3196,43 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) for_each_tile(tile, xe, id) { if (xe_pt_zap_ptes(tile, vma)) { - tile_needs_invalidate |= BIT(id); xe_device_wmb(xe); - /* - * FIXME: We potentially need to invalidate multiple - * GTs within the tile - */ - seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma); - if (seqno[id] < 0) - return seqno[id]; - } - } + xe_gt_tlb_invalidation_fence_init(tile->primary_gt, + &fence[fence_id], + true); + + ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, + &fence[fence_id], vma); + if (ret < 0) { + xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]); + goto wait; + } + ++fence_id; - for_each_tile(tile, xe, id) { - if (tile_needs_invalidate & BIT(id)) { - ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]); - if (ret < 0) - return ret; + if (!tile->media_gt) + continue; + + xe_gt_tlb_invalidation_fence_init(tile->media_gt, + &fence[fence_id], + true); + + ret = xe_gt_tlb_invalidation_vma(tile->media_gt, + &fence[fence_id], vma); + if (ret < 0) { + xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]); + goto wait; + } + ++fence_id; } } +wait: + for (id = 0; id < fence_id; ++id) + xe_gt_tlb_invalidation_fence_wait(&fence[id]); + vma->tile_invalidated = vma->tile_mask; - return 0; + return ret; } struct xe_vm_snapshot { |