summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorZhenyu Wang <zhenyuw@linux.intel.com>2018-03-19 12:09:05 +0300
committerZhenyu Wang <zhenyuw@linux.intel.com>2018-03-19 12:33:30 +0300
commitd8303075699292008ae5b2c8fc728d455b994c26 (patch)
treed47854c493dd0585ff8dd696b31939792c10a206 /drivers/gpu/drm/i915
parentb20c0d5ce1047ba03a6709a07f31f4d7178de35c (diff)
downloadlinux-d8303075699292008ae5b2c8fc728d455b994c26.tar.xz
drm/i915/gvt: force to set all context control bits from guest
Our shadow context content is from guest but with masked control reg like CTX_CONTEXT_CONTROL, we need to make sure all settings from guest would be set when this context is on hw, this trys to force mask enable bits for all to ensure every bits setting would be effective on hw. One regression found related to once inhibit bit is set, gpu engine are working on inhibit state until MI_LOAD_REG_IMM command or context image clear inhibit bit with mask bit set to 1, and val bit set to 0. In gvt-g currently workload has the highest priority, so gvt-g workload could trigger preempt context easily, preempt context set inhibit bit, then gvt-g workload is scheduled in, but gvt-g workload shadow context image usually doesn't set inhibit mask bit, so gpu is still in inhibit state when gvt workload is running. This caused gpu hang. Suggested-by: Zhang, Xiong <xiong.y.zhang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> Reviewed-by: Zhang, Xiong <xiong.y.zhang@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1127bd77fc6e..a55b4975c154 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -124,8 +124,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+#define COPY_REG_MASKED(name) {\
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val),\
+ &shadow_ring_context->name.val, 4);\
+ shadow_ring_context->name.val |= 0xffff << 16;\
+ }
- COPY_REG(ctx_ctrl);
+ COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
@@ -134,6 +140,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
+#undef COPY_REG_MASKED
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +