summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorChuanxiao Dong <chuanxiao.dong@intel.com>2017-03-16 04:47:58 +0300
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-03-17 11:46:45 +0300
commit3cd23b828b37bd5ccf4b40132f12dbf20d7afdb6 (patch)
tree25be3c5a6410475b78bca0c148395fe0254a73b9 /drivers/gpu/drm/i915
parent17f1b1a6d4e5f41df161eb2a90af22c003a243fc (diff)
downloadlinux-3cd23b828b37bd5ccf4b40132f12dbf20d7afdb6.tar.xz
drm/i915/gvt: GVT pin/unpin shadow context
When handling guest request, GVT needs to populate/update shadow_ctx with guest context. This behavior needs to make sure the shadow_ctx is pinned. The current implementation is relying on i195 allocate request to pin but this way cannot guarantee the i915 not to unpin the shadow_ctx when GVT update the guest context from shadow_ctx. So GVT should pin/unpin the shadow_ctx by itself. Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 907e6bc794f6..39a83eb7aecc 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -175,6 +175,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
int ret;
@@ -188,6 +189,21 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&dev_priv->drm.struct_mutex);
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ret = engine->context_pin(engine, shadow_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ workload->status = ret;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+ }
+
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
@@ -231,6 +247,9 @@ out:
if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq);
+ else
+ engine->context_unpin(engine, shadow_ctx);
+
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -380,6 +399,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* For the workload w/o request, directly complete the workload.
*/
if (workload->req) {
+ struct drm_i915_private *dev_priv =
+ workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine =
+ dev_priv->engine[workload->ring_id];
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -392,6 +415,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ /* unpin shadow ctx as the shadow_ctx update is done */
+ engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",