summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/render.c
diff options
context:
space:
mode:
authorChangbin Du <changbin.du@intel.com>2017-05-04 05:52:38 +0300
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-06-08 08:59:15 +0300
commit0e86cc9ccc3bf557348befaddf5cb613cf3c4458 (patch)
treed4ca241359ce82bafdcb9b65938da9961df6b2c9 /drivers/gpu/drm/i915/gvt/render.c
parent5d0f5de16ef3d127469aa09dcdf07bec5174937f (diff)
downloadlinux-0e86cc9ccc3bf557348befaddf5cb613cf3c4458.tar.xz
drm/i915/gvt: implement per-vm mmio switching optimization
Commit ab9da627906a ("drm/i915: make context status notifier head be per engine") gives us a chance to inspect every single request. Then we can eliminate unnecessary mmio switching for same vGPU. We only need mmio switching for different VMs (including host). This patch introduced a new general API intel_gvt_switch_mmio() to replace the old intel_gvt_load/restore_render_mmio(). This function can be further optimized for vGPU to vGPU switching. To support individual ring switch, we track the owner who occupy each ring. When another VM or host request a ring we do the mmio context switching. Otherwise no need to switch the ring. This optimization is very useful if only one guest has plenty of workloads and the host is mostly idle. The best case is no mmio switching will happen. v2: o fix missing ring switch issue. (chuanxiao) o support individual ring switch. Signed-off-by: Changbin Du <changbin.du@intel.com> Reviewed-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/render.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c35
1 files changed, 33 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index c6e7972ac21d..19d98c903672 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -260,7 +260,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
#define CTX_CONTEXT_CONTROL_VAL 0x03
-void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from host to a vgpu. */
+static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
@@ -312,7 +313,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
handle_tlb_pending_event(vgpu, ring_id);
}
-void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from vgpu to host. */
+static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
@@ -348,3 +350,32 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
mmio->value, v);
}
}
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next, int ring_id)
+{
+ if (WARN_ON(!pre && !next))
+ return;
+
+ gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+ /**
+ * TODO: Optimize for vGPU to vGPU switch by merging
+ * switch_mmio_to_host() and switch_mmio_to_vgpu().
+ */
+ if (pre)
+ switch_mmio_to_host(pre, ring_id);
+
+ if (next)
+ switch_mmio_to_vgpu(next, ring_id);
+}