diff options
author | Weinan Li <weinan.z.li@intel.com> | 2018-01-26 10:09:08 +0300 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2018-02-06 22:41:16 +0300 |
commit | 9212b13f28374815d9def65e3c877a35092e1c6e (patch) | |
tree | 8836ab2bd9a7be81aeb8fc859ea5d68d6cf2dc9d /drivers | |
parent | 7569a06dc80ec05c96783f541fa706ea3bebec79 (diff) | |
download | linux-9212b13f28374815d9def65e3c877a35092e1c6e.tar.xz |
drm/i915/gvt: only reset execlist state of one engine during VM engine reset
Only reset vgpu execlist state of the exact engine which gets reset
request from VM. After read context status from HWSP enabled, KMD will use
the saved CSB read pointer but not always read from MMIO. When one engine
reset happen, only the read pointer of this engine will be reset, in GVT-g
host side also need to align with this policy, otherwise VM may get wrong
CSB status after one engine reset compeleted.
v2: Split refine and fix patch, code refine(Zhenyu)
v3: Move active flag of vgpu scheduler into sched_data(Zhenyu)
Cc: Fred Gao <fred.gao@intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Weinan Li <weinan.z.li@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 12 |
3 files changed, 20 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index befda75601ff..9be639aa3b55 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct intel_vgpu_submission *s = &vgpu->submission; u32 data = *(u32 *)p_data; int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; @@ -1523,12 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (!enable_execlist) return 0; - if (s->active) - return 0; - ret = intel_vgpu_select_submission_ops(vgpu, - ALL_ENGINES, - INTEL_VGPU_EXECLIST_SUBMISSION); + ENGINE_MASK(ring_id), + INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index d031f6486ce3..cc1ce361cd76 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; + bool active; ktime_t sched_in_time; ktime_t sched_out_time; @@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); + vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) @@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) struct vgpu_sched_data *vgpu_data = vgpu->sched_data; list_del_init(&vgpu_data->lru_list); + vgpu_data->active = false; } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -374,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { - gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + if (!vgpu_data->active) { + gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + } } void intel_gvt_kick_schedule(struct intel_gvt *gvt) @@ -389,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; int ring_id; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + + if (!vgpu_data->active) + return; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f0997a2f0513..b55b3580ca1d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1092,17 +1092,17 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, if (WARN_ON(interface >= ARRAY_SIZE(ops))) return -EINVAL; - if (s->active) { + if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES)) + return -EINVAL; + + if (s->active) s->ops->clean(vgpu, engine_mask); - s->active = false; - gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", - vgpu->id, s->ops->name); - } if (interface == 0) { s->ops = NULL; s->virtual_submission_interface = 0; - gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); + s->active = false; + gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); return 0; } |