diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b55b3580ca1d..9b92b4e25a20 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) #undef COPY_REG set_context_pdp_root_pointer(shadow_ring_context, - workload->shadow_mm->shadow_page_table); + (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa + @@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return 0; } -static inline bool is_gvt_request(struct drm_i915_gem_request *req) +static inline bool is_gvt_request(struct i915_request *req) { return i915_gem_context_force_single_submission(req->ctx); } @@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { - struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; + struct i915_request *req = data; struct intel_gvt *gvt = container_of(nb, struct intel_gvt, shadow_ctx_notifier_block[req->engine->id]); struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -225,6 +225,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) struct intel_vgpu *vgpu = workload->vgpu; void *shadow_ring_buffer_va; u32 *cs; + struct i915_request *req = workload->req; + + if (IS_KABYLAKE(req->i915) && + is_inhibit_context(req->ctx, req->engine->id)) + intel_vgpu_restore_inhibit_context(vgpu, req); /* allocate shadow ring buffer */ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); @@ -333,13 +338,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct drm_i915_gem_request *rq; + struct i915_request *rq; struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; struct i915_gem_context *shadow_ctx = s->shadow_ctx; int ret; - rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); + rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); ret = PTR_ERR(rq); @@ -348,7 +353,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); - workload->req = i915_gem_request_get(rq); + workload->req = i915_request_get(rq); ret = copy_workload_to_ring_buffer(workload); if (ret) goto err_unpin; @@ -582,7 +587,7 @@ out: if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); - i915_add_request(workload->req); + i915_request_add(workload->req); workload->dispatched = true; } @@ -769,7 +774,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload->status = 0; } - i915_gem_request_put(fetch_and_zero(&workload->req)); + i915_request_put(fetch_and_zero(&workload->req)); if (!workload->status && !(vgpu->resetting_eng & ENGINE_MASK(ring_id))) { @@ -886,7 +891,7 @@ static int workload_thread(void *priv) gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); - i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); complete: gvt_dbg_sched("will complete workload %p, status: %d\n", @@ -1132,7 +1137,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) struct intel_vgpu_submission *s = &workload->vgpu->submission; if (workload->shadow_mm) - intel_gvt_mm_unreference(workload->shadow_mm); + intel_vgpu_mm_put(workload->shadow_mm); kmem_cache_free(s->workloads, workload); } @@ -1181,32 +1186,27 @@ static int prepare_mm(struct intel_vgpu_workload *workload) struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct intel_vgpu_mm *mm; struct intel_vgpu *vgpu = workload->vgpu; - int page_table_level; - u32 pdp[8]; - - if (desc->addressing_mode == 1) { /* legacy 32-bit */ - page_table_level = 3; - } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ - page_table_level = 4; - } else { + intel_gvt_gtt_type_t root_entry_type; + u64 pdps[GVT_RING_CTX_NR_PDPS]; + + switch (desc->addressing_mode) { + case 1: /* legacy 32-bit */ + root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; + break; + case 3: /* legacy 64-bit */ + root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; + break; + default: gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); return -EINVAL; } - read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); + read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); - mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); - if (mm) { - intel_gvt_mm_reference(mm); - } else { + mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); + if (IS_ERR(mm)) + return PTR_ERR(mm); - mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, - pdp, page_table_level, 0); - if (IS_ERR(mm)) { - gvt_vgpu_err("fail to create mm object.\n"); - return PTR_ERR(mm); - } - } workload->shadow_mm = mm; return 0; } |