summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-07-13 04:21:16 +0300
committerDave Airlie <airlied@redhat.com>2017-07-13 04:21:16 +0300
commitcaa164e37374609056c4d25255a6c7bef7a7f43b (patch)
treee70c7b9def52271ebdf9915fd397c1bb2fc488e1 /drivers/gpu/drm/i915/gvt
parent39bf0bff4f179f109305433f83bdea5d1ca350ea (diff)
parenteafbc20701243d21989c9c9aff5e5d9488e1890a (diff)
downloadlinux-caa164e37374609056c4d25255a6c7bef7a7f43b.tar.xz
Merge tag 'drm-intel-next-fixes-2017-07-11' of git://anongit.freedesktop.org/git/drm-intel into drm-next
drm/i915 fixes for v4.13-rc1 * tag 'drm-intel-next-fixes-2017-07-11' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Make DP-MST connector info work drm/i915/gvt: Use fence error from GVT request for workload status drm/i915/gvt: remove scheduler_mutex in per-engine workload_thread drm/i915/gvt: Revert "drm/i915/gvt: Fix possible recursive locking issue" drm/i915/gvt: Audit the command buffer address drm/i915/gvt: Fix a memory leak in intel_gvt_init_gtt() drm/i915/fbdev: Check for existence of ifbdev->vma before operations drm/i915: Hold RPM wakelock while initializing OA buffer drm/i915/cnl: Fix the CURSOR_COEFF_MASK used in DDI Vswing Programming drm/i915/cfl: Fix Workarounds. drm/i915: Avoid undefined behaviour of "u32 >> 32" drm/i915: reintroduce VLV/CHV PFI programming power domain workaround drm/i915: Fix an error checking test drm/i915: Disable MSI for all pre-gen5 drm/i915/gvt: Make function dpy_reg_mmio_readx safe drm/i915/gvt: Don't read ADPA_CRT_HOTPLUG_MONITOR from host drm/i915/gvt: Set initial PORT_CLK_SEL vreg for BDW drm/i915/gvt: Fix inconsistent locks holding sequence drm/i915/gvt: Fix possible recursive locking issue
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
6 files changed, 75 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 51241de5e7a7..713848c36349 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e0261fcc5b50..2deb05f618fb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
@@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
@@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
+ if (IS_BROADWELL(dev_priv)) {
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
+ ~PORT_CLK_SEL_MASK;
+ vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
+ PORT_CLK_SEL_LCPLL_810;
+ }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
@@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
+
+ /* Clear host CRT status, so guest couldn't detect this host CRT. */
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 66374dba3b1a..6166e34d892b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ __free_page(gvt->gtt.scratch_ggtt_page);
return ret;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1414d7e6148d..17febe830ff6 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- *(u32 *)p_data = (1 << 17);
- return 0;
-}
-
-static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- *(u32 *)p_data = 3;
- return 0;
-}
+ switch (offset) {
+ case 0xe651c:
+ case 0xe661c:
+ case 0xe671c:
+ case 0xe681c:
+ vgpu_vreg(vgpu, offset) = 1 << 17;
+ break;
+ case 0xe6c04:
+ vgpu_vreg(vgpu, offset) = 0x3;
+ break;
+ case 0xe6e1c:
+ vgpu_vreg(vgpu, offset) = 0x2f << 16;
+ break;
+ default:
+ return -EINVAL;
+ }
-static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- *(u32 *)p_data = (0x2f << 16);
+ read_vreg(vgpu, offset, p_data, bytes);
return 0;
}
@@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+ MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
@@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
- MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1ae0b4083ce1..fd0c85f9ef3c 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn;
- mutex_lock(&vgpu->vdev.cache_lock);
- while ((node = rb_first(&vgpu->vdev.cache))) {
+ for (;;) {
+ mutex_lock(&vgpu->vdev.cache_lock);
+ node = rb_first(&vgpu->vdev.cache);
+ if (!node) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ break;
+ }
dma = rb_entry(node, struct gvt_dma, node);
gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn;
-
- vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ vfio_unpin_pages(dev, &gfn, 1);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
}
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 488fdea348a9..4f7057d62d88 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- /* If the status is -EINPROGRESS means this workload
- * doesn't meet any issue during dispatching so when
- * get the SCHEDULE_OUT set the status to be zero for
- * good. If the status is NOT -EINPROGRESS means there
- * is something wrong happened during dispatching and
- * the status should not be set to zero
- */
- if (workload->status == -EINPROGRESS)
- workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
+ /* If this request caused GPU hang, req->fence.error will
+ * be set to -EIO. Use -EIO to set workload status so
+ * that when this request caused GPU hang, didn't trigger
+ * context switch interrupt to guest.
+ */
+ if (likely(workload->status == -EINPROGRESS)) {
+ if (workload->req->fence.error == -EIO)
+ workload->status = -EIO;
+ else
+ workload->status = 0;
+ }
+
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
@@ -464,8 +467,6 @@ struct workload_thread_param {
int ring_id;
};
-static DEFINE_MUTEX(scheduler_mutex);
-
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
@@ -497,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload)
break;
- mutex_lock(&scheduler_mutex);
-
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
@@ -537,9 +536,6 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
-
- mutex_unlock(&scheduler_mutex);
-
}
return 0;
}