diff options
author | Jani Nikula <jani.nikula@intel.com> | 2017-06-30 12:49:44 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2017-06-30 12:49:45 +0300 |
commit | 507ad75736f896f705663f22d8fd045cac99afae (patch) | |
tree | fd7ba1f292303f1a8c96c7c6ce0be1cfcc0c16dc /drivers/gpu | |
parent | bdbbf7d619d1fd2f1fa9eb529b7817e4faf73f5e (diff) | |
parent | 5cd82b757795228516bf60a0552d1a40fa8adeb2 (diff) | |
download | linux-507ad75736f896f705663f22d8fd045cac99afae.tar.xz |
Merge tag 'gvt-fixes-2017-06-29' of https://github.com/01org/gvt-linux into drm-intel-next-fixes
gvt-fixes-2017-06-29
- two race fixes for VFIO locks from Chuanxiao
- virtual display fix for BDW from Xiong
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170629065424.kxopjbvntuakbyz2@zhen-hp.sh.intel.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 69 |
4 files changed, 99 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index e0261fcc5b50..2deb05f618fb 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); + if (IS_BROADWELL(dev_priv)) { + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &= + ~PORT_CLK_SEL_MASK; + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |= + PORT_CLK_SEL_LCPLL_810; + } vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; @@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); + if (IS_BROADWELL(dev_priv)) { + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &= + ~PORT_CLK_SEL_MASK; + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |= + PORT_CLK_SEL_LCPLL_810; + } vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; @@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); + if (IS_BROADWELL(dev_priv)) { + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &= + ~PORT_CLK_SEL_MASK; + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |= + PORT_CLK_SEL_LCPLL_810; + } vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; @@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; } + + /* Clear host CRT status, so guest couldn't detect this host CRT. */ + if (IS_BROADWELL(dev_priv)) + vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3a74e79eac2f..28d817e96e58 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -182,6 +182,9 @@ struct intel_vgpu { struct kvm *kvm; struct work_struct release_work; atomic_t released; + struct work_struct unpin_work; + spinlock_t unpin_lock; /* To protect unpin_list */ + struct list_head unpin_list; } vdev; #endif }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 1414d7e6148d..17febe830ff6 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - *(u32 *)p_data = (1 << 17); - return 0; -} - -static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) -{ - *(u32 *)p_data = 3; - return 0; -} + switch (offset) { + case 0xe651c: + case 0xe661c: + case 0xe671c: + case 0xe681c: + vgpu_vreg(vgpu, offset) = 1 << 17; + break; + case 0xe6c04: + vgpu_vreg(vgpu, offset) = 0x3; + break; + case 0xe6e1c: + vgpu_vreg(vgpu, offset) = 0x2f << 16; + break; + default: + return -EINVAL; + } -static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) -{ - *(u32 *)p_data = (0x2f << 16); + read_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write); + MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write); MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write); MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write); @@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL); - MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL); + MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL); + MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1ae0b4083ce1..75a6e1d8af0d 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -78,6 +78,7 @@ struct gvt_dma { struct rb_node node; gfn_t gfn; unsigned long iova; + struct list_head list; }; static inline bool handle_valid(unsigned long handle) @@ -166,6 +167,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, new->gfn = gfn; new->iova = iova; + INIT_LIST_HEAD(&new->list); mutex_lock(&vgpu->vdev.cache_lock); while (*link) { @@ -197,26 +199,52 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, kfree(entry); } -static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) +static void intel_vgpu_unpin_work(struct work_struct *work) { + struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, + vdev.unpin_work); struct device *dev = mdev_dev(vgpu->vdev.mdev); struct gvt_dma *this; - unsigned long g1; - int rc; + unsigned long gfn; + + for (;;) { + spin_lock(&vgpu->vdev.unpin_lock); + if (list_empty(&vgpu->vdev.unpin_list)) { + spin_unlock(&vgpu->vdev.unpin_lock); + break; + } + this = list_first_entry(&vgpu->vdev.unpin_list, + struct gvt_dma, list); + list_del(&this->list); + spin_unlock(&vgpu->vdev.unpin_lock); + + gfn = this->gfn; + vfio_unpin_pages(dev, &gfn, 1); + kfree(this); + } +} + +static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn) +{ + struct gvt_dma *this; mutex_lock(&vgpu->vdev.cache_lock); this = __gvt_cache_find(vgpu, gfn); if (!this) { mutex_unlock(&vgpu->vdev.cache_lock); - return; + return false; } - - g1 = gfn; gvt_dma_unmap_iova(vgpu, this->iova); - rc = vfio_unpin_pages(dev, &g1, 1); - WARN_ON(rc != 1); - __gvt_cache_remove_entry(vgpu, this); + /* remove this from rb tree */ + rb_erase(&this->node, &vgpu->vdev.cache); mutex_unlock(&vgpu->vdev.cache_lock); + + /* put this to the unpin_list */ + spin_lock(&vgpu->vdev.unpin_lock); + list_move_tail(&this->list, &vgpu->vdev.unpin_list); + spin_unlock(&vgpu->vdev.unpin_lock); + + return true; } static void gvt_cache_init(struct intel_vgpu *vgpu) @@ -232,16 +260,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) struct device *dev = mdev_dev(vgpu->vdev.mdev); unsigned long gfn; - mutex_lock(&vgpu->vdev.cache_lock); - while ((node = rb_first(&vgpu->vdev.cache))) { + for (;;) { + mutex_lock(&vgpu->vdev.cache_lock); + node = rb_first(&vgpu->vdev.cache); + if (!node) { + mutex_unlock(&vgpu->vdev.cache_lock); + break; + } dma = rb_entry(node, struct gvt_dma, node); gvt_dma_unmap_iova(vgpu, dma->iova); gfn = dma->gfn; - - vfio_unpin_pages(dev, &gfn, 1); __gvt_cache_remove_entry(vgpu, dma); + mutex_unlock(&vgpu->vdev.cache_lock); + vfio_unpin_pages(dev, &gfn, 1); } - mutex_unlock(&vgpu->vdev.cache_lock); } static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, @@ -453,6 +485,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) } INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); + INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work); + spin_lock_init(&vgpu->vdev.unpin_lock); + INIT_LIST_HEAD(&vgpu->vdev.unpin_list); vgpu->vdev.mdev = mdev; mdev_set_drvdata(mdev, vgpu); @@ -482,6 +517,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, struct intel_vgpu *vgpu = container_of(nb, struct intel_vgpu, vdev.iommu_notifier); + bool sched_unmap = false; if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { struct vfio_iommu_type1_dma_unmap *unmap = data; @@ -491,7 +527,10 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, end_gfn = gfn + unmap->size / PAGE_SIZE; while (gfn < end_gfn) - gvt_cache_remove(vgpu, gfn++); + sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++); + + if (sched_unmap) + schedule_work(&vgpu->vdev.unpin_work); } return NOTIFY_OK; |