diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/aperture_gm.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/debugfs.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.c | 90 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.h | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/dmabuf.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/edid.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/edid.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/fb_decoder.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/fb_decoder.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 139 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio_context.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/page_track.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 6 |
21 files changed, 209 insertions, 423 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index eedd1865bb98..62d14f82256f 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) unsigned int flags; u64 start, end, size; struct drm_mm_node *node; + intel_wakeref_t wakeref; int ret; if (high_gm) { @@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) } mutex_lock(>->ggtt->vm.mutex); - mmio_hw_access_pre(gt); + wakeref = mmio_hw_access_pre(gt); ret = i915_gem_gtt_insert(>->ggtt->vm, NULL, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); - mmio_hw_access_post(gt); + mmio_hw_access_post(gt, wakeref); mutex_unlock(>->ggtt->vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", @@ -226,7 +227,7 @@ out_free_fence: vgpu->fence.regs[i] = NULL; } mutex_unlock(&gvt->gt->ggtt->vm.mutex); - intel_runtime_pm_put_unchecked(uncore->rpm); + intel_runtime_pm_put(uncore->rpm, wakeref); return -ENOSPC; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2f4c9c66b40b..f25ee2953baf 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -50,7 +50,6 @@ #include "trace.h" #include "display/i9xx_plane_regs.h" -#include "display/intel_display.h" #include "display/intel_sprite_regs.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" @@ -1287,6 +1286,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->engine->i915; + struct intel_display *display = &dev_priv->display; struct plane_code_mapping gen8_plane_code[] = { [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE}, [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE}, @@ -1315,9 +1315,9 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1); if (info->plane == PLANE_A) { - info->ctrl_reg = DSPCNTR(dev_priv, info->pipe); - info->stride_reg = DSPSTRIDE(dev_priv, info->pipe); - info->surf_reg = DSPSURF(dev_priv, info->pipe); + info->ctrl_reg = DSPCNTR(display, info->pipe); + info->stride_reg = DSPSTRIDE(display, info->pipe); + info->surf_reg = DSPSURF(display, info->pipe); } else if (info->plane == PLANE_B) { info->ctrl_reg = SPRCTL(info->pipe); info->stride_reg = SPRSTRIDE(info->pipe); @@ -1333,6 +1333,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->engine->i915; + struct intel_display *display = &dev_priv->display; struct intel_vgpu *vgpu = s->vgpu; u32 dword0 = cmd_val(s, 0); u32 dword1 = cmd_val(s, 1); @@ -1381,9 +1382,9 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1); - info->ctrl_reg = DSPCNTR(dev_priv, info->pipe); - info->stride_reg = DSPSTRIDE(dev_priv, info->pipe); - info->surf_reg = DSPSURF(dev_priv, info->pipe); + info->ctrl_reg = DSPCNTR(display, info->pipe); + info->stride_reg = DSPSTRIDE(display, info->pipe); + info->surf_reg = DSPSURF(display, info->pipe); return 0; } @@ -1420,6 +1421,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->engine->i915; + struct intel_display *display = &dev_priv->display; struct intel_vgpu *vgpu = s->vgpu; set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), @@ -1437,7 +1439,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( } if (info->plane == PLANE_PRIMARY) - vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, info->pipe))++; + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, info->pipe))++; if (info->async_flip) intel_vgpu_trigger_virtual_event(vgpu, info->event); @@ -1904,7 +1906,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; unsigned long start_offset = 0; - /* get the start gm address of the batch buffer */ + /* Get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; @@ -1919,15 +1921,16 @@ static int perform_bb_shadow(struct parser_exec_state *s) bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; - /* the start_offset stores the batch buffer's start gma's - * offset relative to page boundary. so for non-privileged batch + /* + * The start_offset stores the batch buffer's start gma's + * offset relative to page boundary. So for non-privileged batch * buffer, the shadowed gem object holds exactly the same page - * layout as original gem object. This is for the convience of + * layout as original gem object. This is for the convenience of * replacing the whole non-privilged batch buffer page to this - * shadowed one in PPGTT at the same gma address. (this replacing + * shadowed one in PPGTT at the same gma address. (This replacing * action is not implemented yet now, but may be necessary in * future). - * for prileged batch buffer, we just change start gma address to + * For prileged batch buffer, we just change start gma address to * that of shadowed page. */ if (bb->ppgtt) @@ -1974,7 +1977,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) /* * ip_va saves the virtual address of the shadow batch buffer, while * ip_gma saves the graphics address of the original batch buffer. - * As the shadow batch buffer is just a copy from the originial one, + * As the shadow batch buffer is just a copy from the original one, * it should be right to use shadow batch buffer'va and original batch * buffer's gma in pair. After all, we don't want to pin the shadow * buffer here (too early). diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index baccbf1761b7..673534f061ef 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) .diff = 0, }; struct diff_mmio *node, *next; + intel_wakeref_t wakeref; INIT_LIST_HEAD(¶m.diff_mmio_list); mutex_lock(&gvt->lock); spin_lock_bh(&gvt->scheduler.mmio_context_lock); - mmio_hw_access_pre(gvt->gt); + wakeref = mmio_hw_access_pre(gvt->gt); /* Recognize all the diff mmios to list. */ intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, ¶m); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); spin_unlock_bh(&gvt->scheduler.mmio_context_lock); mutex_unlock(&gvt->lock); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index c66d6d3177c8..1e1af5e545a4 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -32,12 +32,15 @@ * */ +#include <drm/display/drm_dp.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" #include "display/bxt_dpio_phy_regs.h" #include "display/i9xx_plane_regs.h" +#include "display/intel_crt_regs.h" #include "display/intel_cursor_regs.h" #include "display/intel_display.h" #include "display/intel_dpio_phy.h" @@ -66,8 +69,9 @@ static int get_edp_pipe(struct intel_vgpu *vgpu) static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; - if (!(vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_EDP)) & TRANSCONF_ENABLE)) + if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE)) return 0; if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) @@ -78,12 +82,13 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; if (drm_WARN_ON(&dev_priv->drm, pipe < PIPE_A || pipe >= I915_MAX_PIPES)) return -EINVAL; - if (vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) & TRANSCONF_ENABLE) + if (vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) & TRANSCONF_ENABLE) return 1; if (edp_pipe_is_enabled(vgpu) && @@ -92,7 +97,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) return 0; } -static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { +static const unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { { /* EDID with 1024x768 as its resolution */ /*Header*/ @@ -178,6 +183,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; int pipe; if (IS_BROXTON(dev_priv)) { @@ -190,21 +196,21 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); - for_each_pipe(dev_priv, pipe) { - vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) &= + for_each_pipe(display, pipe) { + vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) &= ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE); - vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE; + vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK; - vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE; + vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK; + vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE; } for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) { - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, trans)) &= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, trans)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE); } - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); @@ -252,8 +258,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * TRANSCODER_A can be enabled. PORT_x depends on the input of * setup_virtual_dp_monitor. */ - vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE; - vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE; /* * Golden M/N are calculated based on: @@ -261,11 +267,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ - vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64); - vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e; - vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000; - vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e; - vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64); + vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000; /* Enable per-DDI/PORT vreg */ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { @@ -288,7 +294,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, - TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP)) |= + TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= @@ -318,7 +324,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, - TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |= + TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); @@ -349,7 +355,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, - TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |= + TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); @@ -398,11 +404,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ - vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64); - vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e; - vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000; - vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e; - vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64); + vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { @@ -413,10 +419,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); @@ -439,10 +445,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); @@ -465,10 +471,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); - vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); @@ -506,14 +512,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; /* Disable Primary/Sprite/Cursor plane */ - for_each_pipe(dev_priv, pipe) { - vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE; + for_each_pipe(display, pipe) { + vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK; - vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE; + vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK; + vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE; } - vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) @@ -568,15 +574,14 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + port->dpcd->data[DP_SINK_COUNT] = 0x1; port->type = type; port->id = resolution; port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC; vgpu->display.port_num = port_num; /* Init hrtimer based on default refresh rate */ - hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - vblank_timer->timer.function = vblank_timer_fn; + hrtimer_setup(&vblank_timer->timer, vblank_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vblank_timer->vrefresh_k = port->vrefresh_k; vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k); @@ -629,6 +634,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon) static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; struct intel_vgpu_irq *irq = &vgpu->irq; int vblank_event[] = { [PIPE_A] = PIPE_A_VBLANK, @@ -650,17 +656,19 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) } if (pipe_is_enabled(vgpu, pipe)) { - vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(dev_priv, pipe))++; + vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(display, pipe))++; intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]); } } void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + struct intel_display *display = &i915->display; int pipe; mutex_lock(&vgpu->vgpu_lock); - for_each_pipe(vgpu->gvt->gt->i915, pipe) + for_each_pipe(display, pipe) emulate_vblank_on_pipe(vgpu, pipe); mutex_unlock(&vgpu->vgpu_lock); } diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index f5616f99ef2f..8090bc53c7e1 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -59,52 +59,10 @@ struct intel_vgpu; #define INTEL_GVT_MAX_UEVENT_VARS 3 -/* DPCD start */ -#define DPCD_SIZE 0x700 - -/* DPCD */ -#define DP_SET_POWER 0x600 -#define DP_SET_POWER_D0 0x1 -#define AUX_NATIVE_WRITE 0x8 -#define AUX_NATIVE_READ 0x9 - -#define AUX_NATIVE_REPLY_MASK (0x3 << 4) -#define AUX_NATIVE_REPLY_ACK (0x0 << 4) #define AUX_NATIVE_REPLY_NAK (0x1 << 4) -#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) #define AUX_BURST_SIZE 20 -/* DPCD addresses */ -#define DPCD_REV 0x000 -#define DPCD_MAX_LINK_RATE 0x001 -#define DPCD_MAX_LANE_COUNT 0x002 - -#define DPCD_TRAINING_PATTERN_SET 0x102 -#define DPCD_SINK_COUNT 0x200 -#define DPCD_LANE0_1_STATUS 0x202 -#define DPCD_LANE2_3_STATUS 0x203 -#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204 -#define DPCD_SINK_STATUS 0x205 - -/* link training */ -#define DPCD_TRAINING_PATTERN_SET_MASK 0x03 -#define DPCD_LINK_TRAINING_DISABLED 0x00 -#define DPCD_TRAINING_PATTERN_1 0x01 -#define DPCD_TRAINING_PATTERN_2 0x02 - -#define DPCD_CP_READY_MASK (1 << 6) - -/* lane status */ -#define DPCD_LANES_CR_DONE 0x11 -#define DPCD_LANES_EQ_DONE 0x22 -#define DPCD_SYMBOL_LOCKED 0x44 - -#define DPCD_INTERLANE_ALIGN_DONE 0x01 - -#define DPCD_SINK_IN_SYNC 0x03 -/* DPCD end */ - #define SBI_RESPONSE_MASK 0x3 #define SBI_RESPONSE_SHIFT 0x1 #define SBI_STAT_MASK 0x1 diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 9efc3ca0ce82..4f599af766b0 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -436,7 +436,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) dmabuf_obj_get(dmabuf_obj); } ret = 0; - gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n", + gvt_dbg_dpy("vgpu%d: reuse dmabuf_obj ref %d, id %d\n", vgpu->id, kref_read(&dmabuf_obj->kref), gfx_plane_info->dmabuf_id); mutex_unlock(&vgpu->dmabuf_lock); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index c022dc736045..89147d33168c 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -32,6 +32,8 @@ * */ +#include <drm/display/drm_dp.h> + #include "display/intel_dp_aux_regs.h" #include "display/intel_gmbus_regs.h" #include "gvt.h" @@ -296,7 +298,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, int byte_count = byte_left; u32 reg_data = 0; - /* Data can only be recevied if previous settings correct */ + /* Data can only be received if previous settings correct */ if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) { if (byte_left <= 0) { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); @@ -504,13 +506,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } /* Always set the wanted value for vms. */ - ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1); + ret_msg_size = (((op & 0x1) == DP_AUX_I2C_READ) ? 2 : 1); vgpu_vreg(vgpu, offset) = DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size); if (msg_length == 3) { - if (!(op & GVT_AUX_I2C_MOT)) { + if (!(op & DP_AUX_I2C_MOT)) { /* stop */ intel_vgpu_init_i2c_edid(vgpu); } else { @@ -530,7 +532,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, i2c_edid->edid_available = true; } } - } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { + } else if ((op & 0x1) == DP_AUX_I2C_WRITE) { /* TODO * We only support EDID reading from I2C_over_AUX. And * we do not expect the index mode to be used. Right now @@ -538,7 +540,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * support the gfx driver to do EDID access. */ } else { - if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ)) + if (drm_WARN_ON(&i915->drm, (op & 0x1) != DP_AUX_I2C_READ)) return; if (drm_WARN_ON(&i915->drm, msg_length != 4)) return; @@ -553,7 +555,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * ACK of I2C_WRITE * returned byte if it is READ */ - aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24; + aux_data_for_write |= DP_AUX_I2C_REPLY_ACK << 24; vgpu_vreg(vgpu, offset + 4) = aux_data_for_write; } diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index c3b5a55aecb3..13fd06590929 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -42,14 +42,6 @@ struct intel_vgpu; #define EDID_SIZE 128 #define EDID_ADDR 0x50 /* Linux hvm EDID addr */ -#define GVT_AUX_NATIVE_WRITE 0x8 -#define GVT_AUX_NATIVE_READ 0x9 -#define GVT_AUX_I2C_WRITE 0x0 -#define GVT_AUX_I2C_READ 0x1 -#define GVT_AUX_I2C_STATUS 0x2 -#define GVT_AUX_I2C_MOT 0x4 -#define GVT_AUX_I2C_REPLY_ACK 0x0 - struct intel_vgpu_edid_data { bool data_valid; unsigned char edid_block[EDID_SIZE]; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index c454e25b2b0f..f9f7ef131371 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -154,8 +154,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 tiled, int stride_mask, int bpp) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; - u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(dev_priv, pipe)) & stride_mask; + u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask; u32 stride = stride_reg; if (GRAPHICS_VER(dev_priv) >= 9) { @@ -210,6 +211,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, struct intel_vgpu_primary_plane_format *plane) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; u32 val, fmt; int pipe; @@ -217,7 +219,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (pipe >= I915_MAX_PIPES) return -ENODEV; - val = vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)); + val = vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)); plane->enabled = !!(val & DISP_ENABLE); if (!plane->enabled) return -ENODEV; @@ -251,7 +253,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; - plane->base = vgpu_vreg_t(vgpu, DSPSURF(dev_priv, pipe)) & I915_GTT_PAGE_MASK; + plane->base = vgpu_vreg_t(vgpu, DSPSURF(display, pipe)) & I915_GTT_PAGE_MASK; if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; @@ -267,14 +269,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); - plane->width = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) & _PIPE_H_SRCSZ_MASK) >> + plane->width = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) & _PIPE_H_SRCSZ_MASK) >> _PIPE_H_SRCSZ_SHIFT; plane->width += 1; - plane->height = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) & + plane->height = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) & _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; plane->height += 1; /* raw height is one minus the real value */ - val = vgpu_vreg_t(vgpu, DSPTILEOFF(dev_priv, pipe)); + val = vgpu_vreg_t(vgpu, DSPTILEOFF(display, pipe)); plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> _PRI_PLANE_X_OFF_SHIFT; plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> @@ -340,6 +342,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; u32 val, mode, index; u32 alpha_plane, alpha_force; int pipe; @@ -348,7 +351,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, if (pipe >= I915_MAX_PIPES) return -ENODEV; - val = vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)); + val = vgpu_vreg_t(vgpu, CURCNTR(display, pipe)); mode = val & MCURSOR_MODE_MASK; plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) @@ -374,7 +377,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", alpha_plane, alpha_force); - plane->base = vgpu_vreg_t(vgpu, CURBASE(dev_priv, pipe)) & I915_GTT_PAGE_MASK; + plane->base = vgpu_vreg_t(vgpu, CURBASE(display, pipe)) & I915_GTT_PAGE_MASK; if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; @@ -385,7 +388,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, return -EINVAL; } - val = vgpu_vreg_t(vgpu, CURPOS(dev_priv, pipe)); + val = vgpu_vreg_t(vgpu, CURPOS(display, pipe)); plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; @@ -395,120 +398,3 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)); return 0; } - -#define SPRITE_FORMAT_NUM (1 << 3) - -static const struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = { - [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"}, - [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"}, - [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"}, - [0x4] = {DRM_FORMAT_AYUV, 32, - "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"}, -}; - -/** - * intel_vgpu_decode_sprite_plane - Decode sprite plane - * @vgpu: input vgpu - * @plane: sprite plane to save decoded info - * This function is called for decoding plane - * - * Returns: - * 0 on success, non-zero if failed. - */ -int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, - struct intel_vgpu_sprite_plane_format *plane) -{ - u32 val, fmt; - u32 color_order, yuv_order; - int drm_format; - int pipe; - - pipe = get_active_pipe(vgpu); - if (pipe >= I915_MAX_PIPES) - return -ENODEV; - - val = vgpu_vreg_t(vgpu, SPRCTL(pipe)); - plane->enabled = !!(val & SPRITE_ENABLE); - if (!plane->enabled) - return -ENODEV; - - plane->tiled = !!(val & SPRITE_TILED); - color_order = !!(val & SPRITE_RGB_ORDER_RGBX); - yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> - _SPRITE_YUV_ORDER_SHIFT; - - fmt = (val & SPRITE_FORMAT_MASK) >> _SPRITE_FMT_SHIFT; - if (!sprite_pixel_formats[fmt].bpp) { - gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); - return -EINVAL; - } - plane->hw_format = fmt; - plane->bpp = sprite_pixel_formats[fmt].bpp; - drm_format = sprite_pixel_formats[fmt].drm_format; - - /* Order of RGB values in an RGBxxx buffer may be ordered RGB or - * BGR depending on the state of the color_order field - */ - if (!color_order) { - if (drm_format == DRM_FORMAT_XRGB2101010) - drm_format = DRM_FORMAT_XBGR2101010; - else if (drm_format == DRM_FORMAT_XRGB8888) - drm_format = DRM_FORMAT_XBGR8888; - } - - if (drm_format == DRM_FORMAT_YUV422) { - switch (yuv_order) { - case 0: - drm_format = DRM_FORMAT_YUYV; - break; - case 1: - drm_format = DRM_FORMAT_UYVY; - break; - case 2: - drm_format = DRM_FORMAT_YVYU; - break; - case 3: - drm_format = DRM_FORMAT_VYUY; - break; - default: - /* yuv_order has only 2 bits */ - break; - } - } - - plane->drm_format = drm_format; - - plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!vgpu_gmadr_is_valid(vgpu, plane->base)) - return -EINVAL; - - plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); - if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", - plane->base); - return -EINVAL; - } - - plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) & - _SPRITE_STRIDE_MASK; - - val = vgpu_vreg_t(vgpu, SPRSIZE(pipe)); - plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> - _SPRITE_SIZE_HEIGHT_SHIFT; - plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >> - _SPRITE_SIZE_WIDTH_SHIFT; - plane->height += 1; /* raw height is one minus the real value */ - plane->width += 1; /* raw width is one minus the real value */ - - val = vgpu_vreg_t(vgpu, SPRPOS(pipe)); - plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; - plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; - - val = vgpu_vreg_t(vgpu, SPROFFSET(pipe)); - plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> - _SPRITE_OFFSET_START_X_SHIFT; - plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> - _SPRITE_OFFSET_START_Y_SHIFT; - - return 0; -} diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index fa6503900c84..436d43c0087b 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -156,7 +156,5 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, struct intel_vgpu_primary_plane_format *plane); int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane); -int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, - struct intel_vgpu_sprite_plane_format *plane); #endif diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 58cca4906f41..ae9b0ded3651 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -71,72 +71,6 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) return false; } -/* translate a guest gmadr to host gmadr */ -int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) -{ - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - - if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), - "invalid guest gmadr %llx\n", g_addr)) - return -EACCES; - - if (vgpu_gmadr_is_aperture(vgpu, g_addr)) - *h_addr = vgpu_aperture_gmadr_base(vgpu) - + (g_addr - vgpu_aperture_offset(vgpu)); - else - *h_addr = vgpu_hidden_gmadr_base(vgpu) - + (g_addr - vgpu_hidden_offset(vgpu)); - return 0; -} - -/* translate a host gmadr to guest gmadr */ -int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) -{ - struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - - if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), - "invalid host gmadr %llx\n", h_addr)) - return -EACCES; - - if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) - *g_addr = vgpu_aperture_gmadr_base(vgpu) - + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); - else - *g_addr = vgpu_hidden_gmadr_base(vgpu) - + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); - return 0; -} - -int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, - unsigned long *h_index) -{ - u64 h_addr; - int ret; - - ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, - &h_addr); - if (ret) - return ret; - - *h_index = h_addr >> I915_GTT_PAGE_SHIFT; - return 0; -} - -int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, - unsigned long *g_index) -{ - u64 g_addr; - int ret; - - ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, - &g_addr); - if (ret) - return ret; - - *g_index = g_addr >> I915_GTT_PAGE_SHIFT; - return 0; -} - #define gtt_type_is_entry(type) \ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ && type != GTT_TYPE_PPGTT_PTE_ENTRY \ @@ -286,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) static void ggtt_invalidate(struct intel_gt *gt) { - mmio_hw_access_pre(gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gt); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - mmio_hw_access_post(gt); + mmio_hw_access_post(gt, wakeref); } static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) @@ -1190,7 +1126,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ppgtt_set_shadow_entry(spt, se, index); return 0; err: - /* Cancel the existing addess mappings of DMA addr. */ + /* Cancel the existing address mappings of DMA addr. */ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(sub_spt, &sub_se); @@ -1259,7 +1195,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, gvt_vdbg_mm("shadow 64K gtt entry\n"); /* * The layout of 64K page is special, the page size is - * controlled by uper PDE. To be simple, we always split + * controlled by upper PDE. To be simple, we always split * 64K page to smaller 4K pages in shadow PT. */ return split_64KB_gtt_entry(vgpu, spt, index, &se); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 2c95aeef4e41..1d10c16e6465 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -452,8 +452,10 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value); -/* Macros for easily accessing vGPU virtual/shadow register. - Explicitly seperate use for typed MMIO reg or real offset.*/ +/* + * Macros for easily accessing vGPU virtual/shadow register. + * Explicitly separate use for typed MMIO reg or real offset. + */ #define vgpu_vreg_t(vgpu, reg) \ (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) #define vgpu_vreg(vgpu, offset) \ @@ -531,12 +533,6 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num); gvt_gmadr_is_hidden(gvt, gmadr)) bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); -int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); -int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); -int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, - unsigned long *h_index); -int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, - unsigned long *g_index); void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, bool primary); @@ -574,14 +570,15 @@ enum { GVT_FAILSAFE_GUEST_ERR, }; -static inline void mmio_hw_access_pre(struct intel_gt *gt) +static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt) { - intel_runtime_pm_get(gt->uncore->rpm); + return intel_runtime_pm_get(gt->uncore->rpm); } -static inline void mmio_hw_access_post(struct intel_gt *gt) +static inline void mmio_hw_access_post(struct intel_gt *gt, + intel_wakeref_t wakeref) { - intel_runtime_pm_put_unchecked(gt->uncore->rpm); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); } /** @@ -702,7 +699,7 @@ static inline void intel_gvt_mmio_set_cmd_write_patch( * @offset: register offset * * Returns: - * True if GPU commmand write to an MMIO should be patched + * True if GPU command write to an MMIO should be patched. */ static inline bool intel_gvt_mmio_is_cmd_write_patch( struct intel_gvt *gvt, unsigned int offset) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0f09344d3c20..1344e6d20a34 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -36,6 +36,8 @@ */ +#include <drm/display/drm_dp.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -43,6 +45,7 @@ #include "intel_mchbar_regs.h" #include "display/bxt_dpio_phy_regs.h" #include "display/i9xx_plane_regs.h" +#include "display/intel_crt_regs.h" #include "display/intel_cursor_regs.h" #include "display/intel_display_types.h" #include "display/intel_dmc_regs.h" @@ -53,6 +56,7 @@ #include "display/intel_pps_regs.h" #include "display/intel_psr_regs.h" #include "display/intel_sprite_regs.h" +#include "display/intel_vga_regs.h" #include "display/skl_universal_plane_regs.h" #include "display/skl_watermark_regs.h" #include "display/vlv_dsi_pll_regs.h" @@ -261,6 +265,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, { struct intel_gvt *gvt = vgpu->gvt; unsigned int fence_num = offset_to_fence_num(off); + intel_wakeref_t wakeref; int ret; ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes); @@ -268,10 +273,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; write_vreg(vgpu, off, p_data, bytes); - mmio_hw_access_pre(gvt->gt); + wakeref = mmio_hw_access_pre(gvt->gt); intel_vgpu_write_fence(vgpu, fence_num, vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); return 0; } @@ -510,7 +515,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) switch (wrpll_ctl & WRPLL_REF_MASK) { case WRPLL_REF_PCH_SSC: - refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; + refclk = 135000; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -541,7 +546,7 @@ out: static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; - int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; + int refclk = 100000; enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; struct dpll clock = {}; @@ -653,11 +658,12 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; enum port port; u32 dp_br, link_m, link_n, htotal, vtotal; /* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */ - port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) & + port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; if (port != PORT_B && port != PORT_D) { gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port)); @@ -673,23 +679,23 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) dp_br = skl_vgpu_get_dp_bitrate(vgpu, port); /* Get DP link symbol clock M/N */ - link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)); - link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)); + link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)); + link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)); /* Get H/V total from transcoder timing */ - htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); - vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); + htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(display, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); + vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(display, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); if (dp_br && link_n && htotal && vtotal) { u64 pixel_clk = 0; u32 new_rate = 0; u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k); - /* Calcuate pixel clock by (ls_clk * M / N) */ + /* Calculate pixel clock by (ls_clk * M / N) */ pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n); pixel_clk *= MSEC_PER_SEC; - /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */ + /* Calculate refresh rate by (pixel_clk / (h_total * v_total)) */ new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1)); if (*old_rate != new_rate) @@ -1009,22 +1015,23 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu, return 0; } -#define DSPSURF_TO_PIPE(dev_priv, offset) \ - calc_index(offset, DSPSURF(dev_priv, PIPE_A), DSPSURF(dev_priv, PIPE_B), DSPSURF(dev_priv, PIPE_C)) +#define DSPSURF_TO_PIPE(display, offset) \ + calc_index(offset, DSPSURF(display, PIPE_A), DSPSURF(display, PIPE_B), DSPSURF(display, PIPE_C)) static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - u32 pipe = DSPSURF_TO_PIPE(dev_priv, offset); + struct intel_display *display = &dev_priv->display; + u32 pipe = DSPSURF_TO_PIPE(display, offset); int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); write_vreg(vgpu, offset, p_data, bytes); - vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset); + vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset); - vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++; + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++; - if (vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) & PLANE_CTL_ASYNC_FLIP) + if (vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) & PLANE_CTL_ASYNC_FLIP) intel_vgpu_trigger_virtual_event(vgpu, event); else set_bit(event, vgpu->irq.flip_done_event[pipe]); @@ -1057,14 +1064,15 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu, unsigned int bytes) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_display *display = &dev_priv->display; enum pipe pipe = REG_50080_TO_PIPE(offset); enum plane_id plane = REG_50080_TO_PLANE(offset); int event = SKL_FLIP_EVENT(pipe, plane); write_vreg(vgpu, offset, p_data, bytes); if (plane == PLANE_PRIMARY) { - vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset); - vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++; + vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset); + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++; } else { vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); } @@ -1129,29 +1137,36 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value, static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, u8 t) { - if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { + if ((t & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_1) { /* training pattern 1 for CR */ /* set LANE0_CR_DONE, LANE1_CR_DONE */ - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CR_DONE | + DP_LANE_CR_DONE << 4; /* set LANE2_CR_DONE, LANE3_CR_DONE */ - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE; - } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == - DPCD_TRAINING_PATTERN_2) { + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CR_DONE | + DP_LANE_CR_DONE << 4; + } else if ((t & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_2) { /* training pattern 2 for EQ */ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */ - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE; - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CHANNEL_EQ_DONE | + DP_LANE_CHANNEL_EQ_DONE << 4; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_SYMBOL_LOCKED | + DP_LANE_SYMBOL_LOCKED << 4; /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */ - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE; - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED; + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CHANNEL_EQ_DONE | + DP_LANE_CHANNEL_EQ_DONE << 4; + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_SYMBOL_LOCKED | + DP_LANE_SYMBOL_LOCKED << 4; /* set INTERLANE_ALIGN_DONE */ - dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |= - DPCD_INTERLANE_ALIGN_DONE; - } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == - DPCD_LINK_TRAINING_DISABLED) { + dpcd->data[DP_LANE_ALIGN_STATUS_UPDATED] |= + DP_INTERLANE_ALIGN_DONE; + } else if ((t & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_DISABLE) { /* finish link training */ /* set sink status as synchronized */ - dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC; + dpcd->data[DP_SINK_STATUS] = DP_RECEIVE_PORT_0_STATUS | + DP_RECEIVE_PORT_1_STATUS; } } @@ -1206,7 +1221,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, len = msg & 0xff; op = ctrl >> 4; - if (op == GVT_AUX_NATIVE_WRITE) { + if (op == DP_AUX_NATIVE_WRITE) { int t; u8 buf[16]; @@ -1252,7 +1267,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, dpcd->data[p] = buf[t]; /* check for link training */ - if (p == DPCD_TRAINING_PATTERN_SET) + if (p == DP_TRAINING_PATTERN_SET) dp_aux_ch_ctl_link_training(dpcd, buf[t]); } @@ -1265,7 +1280,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, return 0; } - if (op == GVT_AUX_NATIVE_READ) { + if (op == DP_AUX_NATIVE_READ) { int idx, i, ret = 0; if ((addr + len + 1) >= DPCD_SIZE) { @@ -1962,10 +1977,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, vgpu == gvt->scheduler.engine_owner[engine->id] || offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) || offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) { - mmio_hw_access_pre(gvt->gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gvt->gt); vgpu_vreg(vgpu, offset) = intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); } return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); @@ -1988,7 +2005,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after * vGPU reset if in resuming. * In S0ix exit, the device power state also transite from D3 to D0 as - * S3 resume, but no vGPU reset (triggered by QEMU devic model). After + * S3 resume, but no vGPU reset (triggered by QEMU device model). After * S0ix exit, all engines continue to work. However the d3_entered * remains set which will break next vGPU reset logic (miss the expected * PPGTT invalidation). @@ -2183,6 +2200,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, static int init_generic_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->gt->i915; + struct intel_display *display = &dev_priv->display; int ret; MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, @@ -2271,21 +2289,21 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ - MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_A), D_ALL, NULL, + MMIO_DH(TRANSCONF(display, TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_B), D_ALL, NULL, + MMIO_DH(TRANSCONF(display, TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_C), D_ALL, NULL, + MMIO_DH(TRANSCONF(display, TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_EDP), D_ALL, NULL, + MMIO_DH(TRANSCONF(display, TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(DSPSURF(dev_priv, PIPE_A), D_ALL, NULL, pri_surf_mmio_write); + MMIO_DH(DSPSURF(display, PIPE_A), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - MMIO_DH(DSPSURF(dev_priv, PIPE_B), D_ALL, NULL, pri_surf_mmio_write); + MMIO_DH(DSPSURF(display, PIPE_B), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - MMIO_DH(DSPSURF(dev_priv, PIPE_C), D_ALL, NULL, pri_surf_mmio_write); + MMIO_DH(DSPSURF(display, PIPE_C), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); @@ -3105,23 +3123,6 @@ int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } /** - * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be - * force-nopriv register - * - * @gvt: a GVT device - * @offset: register offset - * - * Returns: - * True if the register is in force-nonpriv whitelist; - * False if outside; - */ -bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, - unsigned int offset) -{ - return in_whitelist(offset); -} - -/** * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers * @vgpu: a vGPU * @offset: register offset @@ -3212,10 +3213,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt) int i, id; idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { - mmio_hw_access_pre(gvt->gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gvt->gt); for (i = 0; i < vgpu_fence_sz(vgpu); i++) intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); } } @@ -3236,8 +3239,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt) int id; idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { - mmio_hw_access_pre(gvt->gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gvt->gt); intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); } } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ca0fb126b02d..69830a5c49d3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -53,8 +53,8 @@ #include "intel_gvt.h" #include "gvt.h" -MODULE_IMPORT_NS(DMA_BUF); -MODULE_IMPORT_NS(I915_GVT); +MODULE_IMPORT_NS("DMA_BUF"); +MODULE_IMPORT_NS("I915_GVT"); /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 @@ -142,7 +142,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, int ret; /* - * We pin the pages one-by-one to avoid allocating a big arrary + * We pin the pages one-by-one to avoid allocating a big array * on stack to hold pfns. */ for (npage = 0; npage < total_pages; npage++) { diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 32ebacb078e8..3dc912aba80b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -96,9 +96,6 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); -bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, - unsigned int offset); - int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, void *pdata, unsigned int bytes, bool is_read); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 273db14fd5fc..2f7208843367 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -53,7 +53,7 @@ struct engine_mmio { u32 value; }; -/* Raw offset is appened to each line for convenience. */ +/* Raw offset is append to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ @@ -576,8 +576,8 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, /** * We are using raw mmio access wrapper to improve the - * performace for batch mmio read/write, so we need - * handle forcewake mannually. + * performance for batch mmio read/write, so we need + * handle forcewake manually. */ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); switch_mmio(pre, next, engine); diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 908f910420c2..dbad4d853d3a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) u8 *buf; struct opregion_header *header; struct vbt v; - const char opregion_signature[16] = OPREGION_SIGNATURE; gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | @@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) /* emulated opregion with VBT mailbox only */ buf = (u8 *)vgpu_opregion(vgpu)->va; header = (struct opregion_header *)buf; - memcpy(header->signature, opregion_signature, - sizeof(opregion_signature)); + + static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1); + memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature)); + header->size = 0x8; header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; @@ -439,7 +440,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) gvt_vgpu_err("requesting SMI service\n"); return 0; } - /* ignore non 0->1 trasitions */ + /* ignore non 0->1 transitions */ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI] & SWSCI_SCI_TRIGGER) || !(swsci & SWSCI_SCI_TRIGGER)) { diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 60a65435556d..20c3cd807488 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -167,7 +167,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, return -ENXIO; if (unlikely(vgpu->failsafe)) { - /* Remove write protection to prevent furture traps. */ + /* Remove write protection to prevent future traps. */ intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index c077fb4674f0..6c2d68e88266 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -286,8 +286,7 @@ static int tbs_sched_init(struct intel_gvt *gvt) return -ENOMEM; INIT_LIST_HEAD(&data->lru_runq_head); - hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - data->timer.function = tbs_timer_fn; + hrtimer_setup(&data->timer, tbs_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); data->period = GVT_DEFAULT_TIME_SLICE; data->gvt = gvt; @@ -448,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; if (!vgpu_data->active) return; @@ -466,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } - intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for_each_engine(engine, vgpu->gvt->gt, id) { if (scheduler->engine_owner[engine->id] == vgpu) { @@ -475,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a5c8005ec484..6e87c10bc454 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -77,7 +77,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload) } /* - * when populating shadow ctx from guest, we should not overrride oa related + * When populating shadow ctx from guest, we should not override oa related * registers, so that they will not be overlapped by guest oa configs. Thus * made it possible to capture oa data from host for both host and guests. */ @@ -528,9 +528,10 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) int ret; list_for_each_entry(bb, &workload->shadow_bb, list) { - /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va + /* + * For privilege batch buffer and not wa_ctx, the bb_start_cmd_va * is only updated into ring_scan_buffer, not real ring address - * allocated in later copy_workload_to_ring_buffer. pls be noted + * allocated in later copy_workload_to_ring_buffer. Please be noted * shadow_ring_buffer_va is now pointed to real ring buffer va * in copy_workload_to_ring_buffer. */ @@ -546,7 +547,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) * here, rather than switch to shadow bb's gma * address, we directly use original batch buffer's * gma address, and send original bb to hardware - * directly + * directly. */ if (!bb->ppgtt) { i915_gem_ww_ctx_init(&ww, false); @@ -1052,7 +1053,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; - /* free the unsubmited workloads in the queues. */ + /* free the unsubmitted workloads in the queues. */ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { @@ -1774,7 +1775,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, } /** - * intel_vgpu_queue_workload - Qeue a vGPU workload + * intel_vgpu_queue_workload - Queue a vGPU workload * @workload: the workload to queue in */ void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 63c751ca4119..11260392234a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -78,7 +78,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU * generation type (e.g V4 as BDW server, V5 as SKL server). * - * Depening on the physical SKU resource, we might see vGPU types like + * Depending on the physical SKU resource, we might see vGPU types like * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of * vGPU on same physical GPU depending on available resource. Each vGPU * type will have a different number of avail_instance to indicate how @@ -417,7 +417,7 @@ out_unlock: * the whole vGPU to default state as when it is created. This vGPU function * is required both for functionary and security concerns.The ultimate goal * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we - * assign a vGPU to a virtual machine we must isse such reset first. + * assign a vGPU to a virtual machine we must issue such reset first. * * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. @@ -428,7 +428,7 @@ out_unlock: * * The parameter dev_level is to identify if we will do DMLR or GT reset. * The parameter engine_mask is to specific the engines that need to be - * resetted. If value ALL_ENGINES is given for engine_mask, it means + * reset. If value ALL_ENGINES is given for engine_mask, it means * the caller requests a full GT reset that we will reset all virtual * GPU engines. For FLR, engine_mask is ignored. */ |