diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/dmabuf.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/hypercall.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio_context.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mpt.h | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 98 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 5 |
16 files changed, 214 insertions, 130 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 97bfc00d2a82..c62346fdc05d 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) return 0; - if (map) { - vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz, - MEMREMAP_WC); - if (!vgpu->gm.aperture_va) - return -ENOMEM; - } else { - memunmap(vgpu->gm.aperture_va); - vgpu->gm.aperture_va = NULL; - } - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); @@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) aperture_pa >> PAGE_SHIFT, aperture_sz >> PAGE_SHIFT, map); - if (ret) { - memunmap(vgpu->gm.aperture_va); - vgpu->gm.aperture_va = NULL; + if (ret) return ret; - } vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; return 0; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 2ab584f97dfb..2fb7b34ef561 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) ret = PTR_ERR(dmabuf); goto out_free_gem; } - obj->base.dma_buf = dmabuf; i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 769c1c24ae75..70494e394d2c 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, _EL_OFFSET_STATUS_PTR); - ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.read_ptr = 0; ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_execlist(struct intel_vgpu *vgpu) +static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) { - enum intel_engine_id i; + unsigned int tmp; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; + struct intel_vgpu_submission *s = &vgpu->submission; - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - struct intel_vgpu_submission *s = &vgpu->submission; - - kfree(s->ring_scan_buffer[i]); - s->ring_scan_buffer[i] = NULL; - s->ring_scan_buffer_size[i] = 0; + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + kfree(s->ring_scan_buffer[engine->id]); + s->ring_scan_buffer[engine->id] = NULL; + s->ring_scan_buffer_size[engine->id] = 0; } } @@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu, init_vgpu_execlist(vgpu, engine->id); } -static int init_execlist(struct intel_vgpu *vgpu) +static int init_execlist(struct intel_vgpu *vgpu, + unsigned long engine_mask) { - reset_execlist(vgpu, ALL_ENGINES); + reset_execlist(vgpu, engine_mask); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a529d2bd393c..8d5317d0122d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; - unsigned long i; + unsigned long gfn, i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, @@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) if (gtt_type_is_pte_pt(spt->shadow_page.type)) { for_each_present_guest_entry(spt, &ge, i) { - ret = gtt_entry_p2m(vgpu, &ge, &se); - if (ret) - goto fail; + gfn = ops->get_pfn(&ge); + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) || + gtt_entry_p2m(vgpu, &ge, &se)) + ops->set_pfn(&se, gvt->gtt.scratch_mfn); ppgtt_set_shadow_entry(spt, &se, i); } return 0; @@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; - unsigned long gma; + unsigned long gma, gfn; struct intel_gvt_gtt_entry e, m; int ret; @@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, bytes); if (ops->test_present(&e)) { + gfn = ops->get_pfn(&e); + + /* one PTE update may be issued in multiple writes and the + * first write may not construct a valid gfn + */ + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + ops->set_pfn(&m, gvt->gtt.scratch_mfn); + goto out; + } + ret = gtt_entry_p2m(vgpu, &e, &m); if (ret) { gvt_vgpu_err("fail to translate guest gtt entry\n"); @@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } +out: ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); gtt_invalidate(gvt->dev_priv); ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 7dc7a80213a8..c6197d990818 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -82,7 +82,6 @@ struct intel_gvt_device_info { struct intel_vgpu_gm { u64 aperture_sz; u64 hidden_sz; - void *aperture_va; struct drm_mm_node low_gm_node; struct drm_mm_node high_gm_node; }; @@ -127,7 +126,6 @@ struct intel_vgpu_irq { struct intel_vgpu_opregion { bool mapped; void *va; - void *va_gopregion; u32 gfn[INTEL_GVT_OPREGION_PAGES]; }; @@ -152,8 +150,8 @@ enum { struct intel_vgpu_submission_ops { const char *name; - int (*init)(struct intel_vgpu *vgpu); - void (*clean)(struct intel_vgpu *vgpu); + int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); + void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 92d6468daeee..9be639aa3b55 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct intel_vgpu_submission *s = &vgpu->submission; u32 data = *(u32 *)p_data; int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; @@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (!enable_execlist) return 0; - if (s->active) - return 0; - ret = intel_vgpu_select_submission_ops(vgpu, - INTEL_VGPU_EXECLIST_SUBMISSION); + ENGINE_MASK(ring_id), + INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; @@ -2843,6 +2840,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index a1bd82feb827..f8e77e166246 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -58,6 +58,7 @@ struct intel_gvt_mpt { int (*set_opregion)(void *vgpu); int (*get_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu); + bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); }; extern struct intel_gvt_mpt xengt_mpt; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 554d1db1f3c8..909499b73d03 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, return ret; } +static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off) +{ + return off >= vgpu_aperture_offset(vgpu) && + off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); +} + +static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off, + void *buf, unsigned long count, bool is_write) +{ + void *aperture_va; + + if (!intel_vgpu_in_aperture(vgpu, off) || + !intel_vgpu_in_aperture(vgpu, off + count)) { + gvt_vgpu_err("Invalid aperture offset %llu\n", off); + return -EINVAL; + } + + aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, + ALIGN_DOWN(off, PAGE_SIZE), + count + offset_in_page(off)); + if (!aperture_va) + return -EIO; + + if (is_write) + memcpy(aperture_va + offset_in_page(off), buf, count); + else + memcpy(buf, aperture_va + offset_in_page(off), count); + + io_mapping_unmap(aperture_va); + + return 0; +} + static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { @@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, buf, count, is_write); break; case VFIO_PCI_BAR2_REGION_INDEX: - ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, - buf, count, is_write); + ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write); break; case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX: @@ -1575,6 +1607,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) return PFN_DOWN(__pa(addr)); } +static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) +{ + struct kvmgt_guest_info *info; + struct kvm *kvm; + + if (!handle_valid(handle)) + return false; + + info = (struct kvmgt_guest_info *)handle; + kvm = info->kvm; + + return kvm_is_visible_gfn(kvm, gfn); + +} + struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, @@ -1590,6 +1637,7 @@ struct intel_gvt_mpt kvmgt_mpt = { .set_opregion = kvmgt_set_opregion, .get_vfio_device = kvmgt_get_vfio_device, .put_vfio_device = kvmgt_put_vfio_device, + .is_valid_gfn = kvmgt_is_valid_gfn, }; EXPORT_SYMBOL_GPL(kvmgt_mpt); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 562b5ad857a4..5c869e3fdf3b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) -static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 aperture_sz = vgpu_aperture_sz(vgpu); - - return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz; -} - -static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa, - void *pdata, unsigned int size, bool is_read) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 offset = gpa - aperture_gpa; - - if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) { - gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n", - offset, size); - return -EINVAL; - } - - if (!vgpu->gm.aperture_va) { - gvt_vgpu_err("BAR is not enabled\n"); - return -ENXIO; - } - - if (is_read) - memcpy(pdata, vgpu->gm.aperture_va + offset, size); - else - memcpy(vgpu->gm.aperture_va + offset, pdata, size); - return 0; -} - static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes, bool read) { @@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, } mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); - goto out; - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) @@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); - goto out; - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 74834395dd89..73ad6e90e49d 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - { /* Terminated */ } + {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { @@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ - { /* Terminated */ } + {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct { @@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) }; int ring_id, i; - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { + for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { gen9_render_mocs.control_table[ring_id][i] = @@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) switch_mocs(pre, next, ring_id); - mmio = dev_priv->gvt->engine_mmio_list; - while (i915_mmio_reg_offset((mmio++)->reg)) { + for (mmio = dev_priv->gvt->engine_mmio_list; + i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->ring_id != ring_id) continue; // save diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index ca8005a6d5fa..81aff4eacbfe 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) intel_gvt_host.mpt->put_vfio_device(vgpu); } +/** + * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn + * @vgpu: a vGPU + * @gfn: guest PFN + * + * Returns: + * true on valid gfn, false on not. + */ +static inline bool intel_gvt_hypervisor_is_valid_gfn( + struct intel_vgpu *vgpu, unsigned long gfn) +{ + if (!intel_gvt_host.mpt->is_valid_gfn) + return true; + + return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); +} + #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 8420d1fc3ddb..fa75a2eead90 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { int i, ret = 0; - unsigned long pfn; gvt_dbg_core("emulate opregion from kernel\n"); switch (intel_gvt_host.hypervisor_type) { case INTEL_GVT_HYPERVISOR_KVM: - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT); - vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT, - INTEL_GVT_OPREGION_SIZE, - MEMREMAP_WB); - if (!vgpu_opregion(vgpu)->va_gopregion) { - gvt_vgpu_err("failed to map guest opregion\n"); - ret = -EFAULT; - } - vgpu_opregion(vgpu)->mapped = true; + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; break; case INTEL_GVT_HYPERVISOR_XEN: /** @@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (vgpu_opregion(vgpu)->mapped) map_vgpu_opregion(vgpu, false); } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - if (vgpu_opregion(vgpu)->mapped) { - memunmap(vgpu_opregion(vgpu)->va_gopregion); - vgpu_opregion(vgpu)->va_gopregion = NULL; - } + /* Guest opregion is released by VFIO */ } free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); @@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic) */ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) { - u32 *scic, *parm; + u32 scic, parm; u32 func, subfunc; + u64 scic_pa = 0, parm_pa = 0; + int ret; switch (intel_gvt_host.hypervisor_type) { case INTEL_GVT_HYPERVISOR_XEN: - scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; - parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; + scic = *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_SCIC); + parm = *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_PARM); break; case INTEL_GVT_HYPERVISOR_KVM: - scic = vgpu_opregion(vgpu)->va_gopregion + - INTEL_GVT_OPREGION_SCIC; - parm = vgpu_opregion(vgpu)->va_gopregion + - INTEL_GVT_OPREGION_PARM; + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_SCIC; + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_PARM; + + ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, + &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, + &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + break; default: gvt_vgpu_err("not supported hypervisor\n"); @@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) return 0; } - func = GVT_OPREGION_FUNC(*scic); - subfunc = GVT_OPREGION_SUBFUNC(*scic); - if (!querying_capabilities(*scic)) { + func = GVT_OPREGION_FUNC(scic); + subfunc = GVT_OPREGION_SUBFUNC(scic); + if (!querying_capabilities(scic)) { gvt_vgpu_err("requesting runtime service: func \"%s\"," " subfunc \"%s\"\n", opregion_func_name(func), @@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) * emulate exit status of function call, '0' means * "failure, generic, unsupported or unknown cause" */ - *scic &= ~OPREGION_SCIC_EXIT_MASK; - return 0; + scic &= ~OPREGION_SCIC_EXIT_MASK; + goto out; + } + + scic = 0; + parm = 0; + +out: + switch (intel_gvt_host.hypervisor_type) { + case INTEL_GVT_HYPERVISOR_XEN: + *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_SCIC) = scic; + *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_PARM) = parm; + break; + case INTEL_GVT_HYPERVISOR_KVM: + ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, + &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, + &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + break; + default: + gvt_vgpu_err("not supported hypervisor\n"); + return -EINVAL; } - *scic = 0; - *parm = 0; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index eea1a2f92099..cc1ce361cd76 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; + bool active; ktime_t sched_in_time; ktime_t sched_out_time; @@ -308,8 +309,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) { + struct intel_gvt *gvt = vgpu->gvt; + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; + kfree(vgpu->sched_data); vgpu->sched_data = NULL; + + /* this vgpu id has been removed */ + if (idr_is_empty(&gvt->vgpu_idr)) + hrtimer_cancel(&sched_data->timer); } static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) @@ -325,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); + vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) @@ -332,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) struct vgpu_sched_data *vgpu_data = vgpu->sched_data; list_del_init(&vgpu_data->lru_list); + vgpu_data->active = false; } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -367,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { - gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + if (!vgpu_data->active) { + gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + } } void intel_gvt_kick_schedule(struct intel_gvt *gvt) @@ -382,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; int ring_id; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + + if (!vgpu_data->active) + return; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0056638b0c16..b55b3580ca1d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; - intel_vgpu_select_submission_ops(vgpu, 0); + intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -1079,6 +1079,7 @@ out_shadow_ctx: * */ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned long engine_mask, unsigned int interface) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, if (WARN_ON(interface >= ARRAY_SIZE(ops))) return -EINVAL; - if (s->active) { - s->ops->clean(vgpu); - s->active = false; - gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", - vgpu->id, s->ops->name); - } + if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES)) + return -EINVAL; + + if (s->active) + s->ops->clean(vgpu, engine_mask); if (interface == 0) { s->ops = NULL; s->virtual_submission_interface = 0; - gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); + s->active = false; + gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); return 0; } - ret = ops[interface]->init(vgpu); + ret = ops[interface]->init(vgpu, engine_mask); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3de77dfa7c59..ff175a98b19e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned long engine_mask, unsigned int interface); extern const struct intel_vgpu_submission_ops diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 4688619f6a1c..b87b19d8443c 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_gvt_debugfs_remove_vgpu(vgpu); idr_remove(&gvt->vgpu_idr, vgpu->id); + if (idr_is_empty(&gvt->vgpu_idr)) + intel_gvt_clean_irq(gvt); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); @@ -518,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_submission(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { - intel_vgpu_select_submission_ops(vgpu, 0); - + intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); /*fence will not be reset during virtual reset */ if (dmlr) { intel_vgpu_reset_gtt(vgpu); |