summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c48
1 files changed, 42 insertions, 6 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 872de052d670..a4845d4213b0 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -8,6 +8,7 @@
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
+#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -1401,8 +1402,12 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
- if (IS_ERR(vm->scratch_pt[id][i]))
- return PTR_ERR(vm->scratch_pt[id][i]);
+ if (IS_ERR(vm->scratch_pt[id][i])) {
+ int err = PTR_ERR(vm->scratch_pt[id][i]);
+
+ vm->scratch_pt[id][i] = NULL;
+ return err;
+ }
xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
}
@@ -1476,8 +1481,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
* scheduler drops all the references of it, hence protecting the VM
* for this case is necessary.
*/
- if (flags & XE_VM_FLAG_LR_MODE)
+ if (flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
xe_pm_runtime_get_noresume(xe);
+ }
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@@ -1522,10 +1529,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
- if (vm->flags & XE_VM_FLAG_LR_MODE) {
- INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ if (vm->flags & XE_VM_FLAG_LR_MODE)
vm->batch_invalidate_tlb = false;
- }
/* Fill pt_root after allocating scratch tables */
for_each_tile(tile, xe, id) {
@@ -1581,9 +1586,40 @@ err_no_resv:
static void xe_vm_close(struct xe_vm *vm)
{
+ struct xe_device *xe = vm->xe;
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
down_write(&vm->lock);
+
vm->size = 0;
+
+ if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /* Wait for pending binds */
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ if (bound) {
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_pt_clear(xe, vm->pt_root[id]);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_tlb_invalidation_vm(gt, vm);
+ }
+ }
+
up_write(&vm->lock);
+
+ if (bound)
+ drm_dev_exit(idx);
}
void xe_vm_close_and_put(struct xe_vm *vm)