summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/render.c
diff options
context:
space:
mode:
authorXu Han <xu.han@intel.com>2017-03-29 05:13:59 +0300
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-03-29 10:28:51 +0300
commite3476c0021c31b64070c40f02bfb7faab55591b4 (patch)
tree43155b1412ece81d1f7aa2eaaeec30d6a1200119 /drivers/gpu/drm/i915/gvt/render.c
parent6f696d13558e7c3ea6835a4215629ffc16979bf6 (diff)
downloadlinux-e3476c0021c31b64070c40f02bfb7faab55591b4.tar.xz
drm/i915/gvt: Add KBL dispatch logic in each function.
Extend function dispatch logic to support KBL platform. Signed-off-by: Xu Han <xu.han@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/render.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index f0b3de352aae..e24e57afc45e 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -171,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -204,7 +204,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
offset.reg = regs[ring_id];
@@ -242,7 +242,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
offset.reg = regs[ring_id];
@@ -277,7 +277,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id);
@@ -324,7 +325,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 v;
int i, array_size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id);