diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/aperture_gm.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 78 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 120 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 103 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 86 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.c | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/reg.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 162 |
17 files changed, 495 insertions, 299 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea6..f7bce8603958 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -37,13 +37,6 @@ #include "i915_drv.h" #include "gvt.h" -#define MB_TO_BYTES(mb) ((mb) << 20ULL) -#define BYTES_TO_MB(b) ((b) >> 20ULL) - -#define HOST_LOW_GM_SIZE MB_TO_BYTES(128) -#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) -#define HOST_FENCE 4 - static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; @@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, POSTING_READ(fence_reg_lo); } +static void _clear_vgpu_fence(struct intel_vgpu *vgpu) +{ + int i; + + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, 0); +} + static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; @@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); + _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; - intel_vgpu_write_fence(vgpu, i, 0); list_add_tail(®->link, &dev_priv->mm.fence_list); } @@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) continue; list_del(pos); vgpu->fence.regs[i] = reg; - intel_vgpu_write_fence(vgpu, i, 0); if (++i == vgpu_fence_sz(vgpu)) break; } if (i != vgpu_fence_sz(vgpu)) goto out_free_fence; + _clear_vgpu_fence(vgpu); + mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); return 0; @@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) } /** + * intel_vgpu_reset_resource - reset resource state owned by a vGPU + * @vgpu: a vGPU + * + * This function is used to reset resource state owned by a vGPU. + * + */ +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + intel_runtime_pm_get(dev_priv); + _clear_vgpu_fence(vgpu); + intel_runtime_pm_put(dev_priv); +} + +/** * intel_alloc_vgpu_resource - allocate HW resource for a vGPU * @vgpu: vGPU * @param: vGPU creation params diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index db516382a4d4..4a6a2ed65732 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 changed = old ^ new; int ret; + memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; @@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, return ret; } - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); return 0; } @@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, if (WARN_ON(bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) + if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) return -EINVAL; /* First check if it's PCI_COMMAND */ @@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } return 0; } + +/** + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU + * + * @vgpu: a vGPU + * @primary: is the vGPU presented as primary + * + */ +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + u16 *gmch_ctl; + int i; + + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, + info->cfg_space_size); + + if (!primary) { + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + } + + /* Show guest that there isn't any stolen memory.*/ + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); + + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, + gvt_aperture_pa_base(gvt), true); + + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO + | PCI_COMMAND_MEMORY + | PCI_COMMAND_MASTER); + /* + * Clear the bar upper 32bit and let guest to assign the new value + */ + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); + + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { + vgpu->cfg_space.bar[i].size = pci_resource_len( + gvt->dev_priv->drm.pdev, i * 2); + vgpu->cfg_space.bar[i].tracked = false; + } +} + +/** + * intel_vgpu_reset_cfg_space - reset vGPU configuration space + * + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) +{ + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != + INTEL_GVT_PCI_CLASS_VGA_OTHER; + + if (cmd & PCI_COMMAND_MEMORY) { + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); + } + + /** + * Currently we only do such reset when vGPU is not + * owned by any VM, so we simply restore entire cfg + * space to default value. + */ + intel_vgpu_init_cfg_space(vgpu, primary); +} diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d26a092c70e8..e4563984cb1e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -481,7 +481,6 @@ struct parser_exec_state { (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) static unsigned long bypass_scan_mask = 0; -static bool bypass_batch_buffer_scan = true; /* ring ALL, type = 0 */ static struct sub_op_bits sub_op_mi[] = { @@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; - if (bypass_batch_buffer_scan) - return 0; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..34083731669d 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) - -#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) -#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) -static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, - unsigned long add, int gmadr_bytes) -{ - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) - return -1; - - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & - BATCH_BUFFER_ADDR_MASK; - if (gmadr_bytes == 8) { - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = - add & BATCH_BUFFER_ADDR_HIGH_MASK; - } - - return 0; -} - static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + struct intel_shadow_bb_entry *entry_obj; /* pin the gem object to ggtt */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { + struct i915_vma *vma; - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - struct i915_vma *vma; - - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, - 4, 0); - if (IS_ERR(vma)) { - gvt_err("Cannot pin\n"); - return; - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - /* update the relocate gma with shadow batch buffer*/ - set_gma_to_bb_cmd(entry_obj, - i915_ggtt_offset(vma), - gmadr_bytes); + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); + if (IS_ERR(vma)) { + gvt_err("Cannot pin\n"); + return; } + + /* FIXME: we are not tracking our pinned VMA leaving it + * up to the core to fix up the stray pin_count upon + * free. + */ + + /* update the relocate gma with shadow batch buffer*/ + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); + if (gmadr_bytes == 8) + entry_obj->bb_start_cmd_va[2] = 0; } } @@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&vgpu->workload_q_head[i]); } - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", sizeof(struct intel_vgpu_workload), 0, SLAB_HWCACHE_ALIGN, NULL); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 7eaaf1c9ed2b..47dec4acf7ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -240,15 +240,8 @@ static inline int get_pse_type(int type) static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; - u64 pte; -#ifdef readq - pte = readq(addr); -#else - pte = ioread32(addr); - pte |= (u64)ioread32(addr + 4) << 32; -#endif - return pte; + return readq(addr); } static void write_pte64(struct drm_i915_private *dev_priv, @@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; -#ifdef writeq writeq(pte, addr); -#else - iowrite32((u32)pte, addr); - iowrite32(pte >> 32, addr + 4); -#endif + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); } @@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) info->gtt_entry_size; mem = kzalloc(mm->has_shadow_page_table ? mm->page_table_entry_size * 2 - : mm->page_table_entry_size, - GFP_ATOMIC); + : mm->page_table_entry_size, GFP_KERNEL); if (!mem) return -ENOMEM; mm->virtual_page_table = mem; @@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm; int ret; - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); + mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto fail; @@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; - struct page *scratch_pt; + void *scratch_pt; unsigned long mfn; int i; - void *p; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { gvt_err("fail to allocate scratch page\n"); return -ENOMEM; } - p = kmap_atomic(scratch_pt); - mfn = intel_gvt_hypervisor_virt_to_mfn(p); + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); - kunmap_atomic(p); - __free_page(scratch_pt); + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); + free_page((unsigned long)scratch_pt); return -EFAULT; } gtt->scratch_pt[type].page_mfn = mfn; - gtt->scratch_pt[type].page = scratch_pt; + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, mfn); @@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * scratch_pt[type] indicate the scratch pt/scratch page used by the * 'type' pt. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { @@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, se.val64 |= PPAT_CACHED_INDEX; for (i = 0; i < page_entry_num; i++) - ops->set_entry(p, &se, i, false, 0, vgpu); + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } - kunmap_atomic(p); - return 0; } @@ -1998,6 +1981,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) INIT_LIST_HEAD(>t->oos_page_list_head); INIT_LIST_HEAD(>t->post_shadow_list_head); + intel_vgpu_reset_ggtt(vgpu); + ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, NULL, 1, 0); if (IS_ERR(ggtt_mm)) { @@ -2206,6 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; + void *page; gvt_dbg_core("init gtt\n"); @@ -2218,6 +2204,20 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } + page = (void *)get_zeroed_page(GFP_KERNEL); + if (!page) { + gvt_err("fail to allocate scratch ggtt page\n"); + return -ENOMEM; + } + gvt->gtt.scratch_ggtt_page = virt_to_page(page); + + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); + if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { + gvt_err("fail to translate scratch ggtt page\n"); + __free_page(gvt->gtt.scratch_ggtt_page); + return -EFAULT; + } + if (enable_out_of_sync) { ret = setup_spt_oos(gvt); if (ret) { @@ -2239,6 +2239,68 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { + __free_page(gvt->gtt.scratch_ggtt_page); + if (enable_out_of_sync) clean_spt_oos(gvt); } + +/** + * intel_vgpu_reset_ggtt - reset the GGTT entry + * @vgpu: a vGPU + * + * This function is called at the vGPU create stage + * to reset all the GGTT entries. + * + */ +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + u32 index; + u32 offset; + u32 num_entries; + struct intel_gvt_gtt_entry e; + + memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); + e.type = GTT_TYPE_GGTT_PTE; + ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); + e.val64 |= _PAGE_PRESENT; + + index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; + num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; + for (offset = 0; offset < num_entries; offset++) + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); + + index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; + num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; + for (offset = 0; offset < num_entries; offset++) + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); +} + +/** + * intel_vgpu_reset_gtt - reset the all GTT related status + * @vgpu: a vGPU + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset + * + * This function is called from vfio core to reset reset all + * GTT related status, including GGTT, PPGTT, scratch page. + * + */ +void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) +{ + int i; + + ppgtt_free_all_shadow_page(vgpu); + if (!dmlr) + return; + + intel_vgpu_reset_ggtt(vgpu); + + /* clear scratch page for security */ + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + if (vgpu->gtt.scratch_pt[i].page != NULL) + memset(page_address(vgpu->gtt.scratch_pt[i].page), + 0, PAGE_SIZE); + } +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d250013bc37b..f88eb5e89bea 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -81,6 +81,9 @@ struct intel_gvt_gtt { struct list_head oos_page_use_list_head; struct list_head oos_page_free_list_head; struct list_head mm_lru_list_head; + + struct page *scratch_ggtt_page; + unsigned long scratch_ggtt_mfn; }; enum { @@ -202,8 +205,10 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c3d2fd..e6bf5c533fbe 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); intel_gvt_clean_vgpu_types(gvt); + idr_destroy(&gvt->vgpu_idr); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) gvt_dbg_core("init gvt device\n"); + idr_init(&gvt->vgpu_idr); + mutex_init(&gvt->lock); gvt->dev_priv = dev_priv; @@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_setup_mmio_info(gvt); if (ret) - return ret; + goto out_clean_idr; ret = intel_gvt_load_firmware(gvt); if (ret) @@ -313,6 +317,8 @@ out_free_firmware: intel_gvt_free_firmware(gvt); out_clean_mmio_info: intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); kfree(gvt); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ad0e9364ee70..e227caf5859e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -175,6 +175,7 @@ struct intel_vgpu { struct notifier_block group_notifier; struct kvm *kvm; struct work_struct release_work; + atomic_t released; } vdev; #endif }; @@ -322,6 +323,7 @@ struct intel_vgpu_creation_params { int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, struct intel_vgpu_creation_params *param); +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value); @@ -374,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); @@ -410,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, unsigned long *g_index); +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary); +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); + int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); @@ -423,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); -int setup_vgpu_mmio(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu); struct intel_gvt_ops { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312..ab2ea157da4c 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, static int new_mmio_info(struct intel_gvt *gvt, u32 offset, u32 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, - void *read, void *write) + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) { struct intel_gvt_mmio_info *info, *p; u32 start, end, i; @@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, default: /*should not hit here*/ gvt_err("invalid forcewake offset 0x%x\n", offset); - return 1; + return -EINVAL; } } else { ack_reg_offset = FORCEWAKE_ACK_HSW_REG; @@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, return 0; } -static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes, unsigned long bitmap) -{ - struct intel_gvt_workload_scheduler *scheduler = - &vgpu->gvt->scheduler; - - vgpu->resetting = true; - - intel_vgpu_stop_schedule(vgpu); - /* - * The current_vgpu will set to NULL after stopping the - * scheduler when the reset is triggered by current vgpu. - */ - if (scheduler->current_vgpu == NULL) { - mutex_unlock(&vgpu->gvt->lock); - intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&vgpu->gvt->lock); - } - - intel_vgpu_reset_execlist(vgpu, bitmap); - - /* full GPU reset */ - if (bitmap == 0xff) { - mutex_unlock(&vgpu->gvt->lock); - intel_vgpu_clean_gtt(vgpu); - mutex_lock(&vgpu->gvt->lock); - setup_vgpu_mmio(vgpu); - populate_pvinfo_page(vgpu); - intel_vgpu_init_gtt(vgpu); - } - - vgpu->resetting = false; - - return 0; -} - static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) + void *p_data, unsigned int bytes) { + unsigned int engine_mask = 0; u32 data; - u64 bitmap = 0; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & GEN6_GRDOM_FULL) { gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); - bitmap = 0xff; - } - if (data & GEN6_GRDOM_RENDER) { - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - bitmap |= (1 << RCS); - } - if (data & GEN6_GRDOM_MEDIA) { - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - bitmap |= (1 << VCS); - } - if (data & GEN6_GRDOM_BLT) { - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - bitmap |= (1 << BCS); - } - if (data & GEN6_GRDOM_VECS) { - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - bitmap |= (1 << VECS); - } - if (data & GEN8_GRDOM_MEDIA2) { - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - bitmap |= (1 << VCS2); + engine_mask = ALL_ENGINES; + } else { + if (data & GEN6_GRDOM_RENDER) { + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); + engine_mask |= (1 << RCS); + } + if (data & GEN6_GRDOM_MEDIA) { + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); + engine_mask |= (1 << VCS); + } + if (data & GEN6_GRDOM_BLT) { + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); + engine_mask |= (1 << BCS); + } + if (data & GEN6_GRDOM_VECS) { + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); + engine_mask |= (1 << VECS); + } + if (data & GEN8_GRDOM_MEDIA2) { + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); + if (HAS_BSD2(vgpu->gvt->dev_priv)) + engine_mask |= (1 << VCS2); + } } - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); + + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); + + return 0; } static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, @@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, +static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; @@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int rc = 0; unsigned int id = 0; write_vreg(vgpu, offset, p_data, bytes); @@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, id = VECS; break; default: - rc = -EINVAL; - break; + return -EINVAL; } set_bit(id, (void *)vgpu->tlb_handle_pending); - return rc; + return 0; } static int ring_reset_ctl_write(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4dd6722a7339..3f656e3a6e5a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -114,12 +114,15 @@ out: static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) { struct gvt_dma *entry; + kvm_pfn_t pfn; mutex_lock(&vgpu->vdev.cache_lock); + entry = __gvt_cache_find(vgpu, gfn); - mutex_unlock(&vgpu->vdev.cache_lock); + pfn = (entry == NULL) ? 0 : entry->pfn; - return entry == NULL ? 0 : entry->pfn; + mutex_unlock(&vgpu->vdev.cache_lock); + return pfn; } static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) @@ -166,7 +169,7 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) { - struct device *dev = &vgpu->vdev.mdev->dev; + struct device *dev = mdev_dev(vgpu->vdev.mdev); struct gvt_dma *this; unsigned long g1; int rc; @@ -195,7 +198,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; - struct device *dev = &vgpu->vdev.mdev->dev; + struct device *dev = mdev_dev(vgpu->vdev.mdev); unsigned long gfn; mutex_lock(&vgpu->vdev.cache_lock); @@ -227,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, return NULL; } -static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, - char *buf) +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) { struct intel_vgpu_type *type; unsigned int num = 0; @@ -266,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, type->fence); } -static MDEV_TYPE_ATTR_RO(available_instance); +static MDEV_TYPE_ATTR_RO(available_instances); static MDEV_TYPE_ATTR_RO(device_api); static MDEV_TYPE_ATTR_RO(description); static struct attribute *type_attrs[] = { - &mdev_type_attr_available_instance.attr, + &mdev_type_attr_available_instances.attr, &mdev_type_attr_device_api.attr, &mdev_type_attr_description.attr, NULL, @@ -395,21 +398,24 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) struct intel_vgpu_type *type; struct device *pdev; void *gvt; + int ret; - pdev = mdev->parent->dev; + pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); if (!type) { gvt_err("failed to find type %s to create\n", kobject_name(kobj)); - return -EINVAL; + ret = -EINVAL; + goto out; } vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { - gvt_err("create intel vgpu failed\n"); - return -EINVAL; + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); + gvt_err("failed to create intel vgpu: %d\n", ret); + goto out; } INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); @@ -418,8 +424,11 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) mdev_set_drvdata(mdev, vgpu); gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", - dev_name(&mdev->dev)); - return 0; + dev_name(mdev_dev(mdev))); + ret = 0; + +out: + return ret; } static int intel_vgpu_remove(struct mdev_device *mdev) @@ -482,7 +491,7 @@ static int intel_vgpu_open(struct mdev_device *mdev) vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events, + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, &vgpu->vdev.iommu_notifier); if (ret != 0) { gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); @@ -490,17 +499,26 @@ static int intel_vgpu_open(struct mdev_device *mdev) } events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events, + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, &vgpu->vdev.group_notifier); if (ret != 0) { gvt_err("vfio_register_notifier for group failed: %d\n", ret); goto undo_iommu; } - return kvmgt_guest_init(mdev); + ret = kvmgt_guest_init(mdev); + if (ret) + goto undo_group; + + atomic_set(&vgpu->vdev.released, 0); + return ret; + +undo_group: + vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + &vgpu->vdev.group_notifier); undo_iommu: - vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &vgpu->vdev.iommu_notifier); out: return ret; @@ -509,17 +527,26 @@ out: static void __intel_vgpu_release(struct intel_vgpu *vgpu) { struct kvmgt_guest_info *info; + int ret; if (!handle_valid(vgpu->handle)) return; - vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, + if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) + return; + + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, &vgpu->vdev.iommu_notifier); - vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, + WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); + + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY, &vgpu->vdev.group_notifier); + WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); info = (struct kvmgt_guest_info *)vgpu->handle; kvmgt_guest_exit(info); + + vgpu->vdev.kvm = NULL; vgpu->handle = 0; } @@ -534,6 +561,7 @@ static void intel_vgpu_release_work(struct work_struct *work) { struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, vdev.release_work); + __intel_vgpu_release(vgpu); } @@ -1089,7 +1117,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return 0; } -static const struct parent_ops intel_vgpu_ops = { +static const struct mdev_parent_ops intel_vgpu_ops = { .supported_type_groups = intel_vgpu_type_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1134,6 +1162,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } spin_lock(&kvm->mmu_lock); @@ -1164,6 +1196,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } spin_lock(&kvm->mmu_lock); @@ -1311,18 +1347,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { - struct intel_vgpu *vgpu; - if (!info) { gvt_err("kvmgt_guest_info invalid\n"); return false; } - vgpu = info->vgpu; - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvmgt_protect_table_destroy(info); - gvt_cache_destroy(vgpu); + gvt_cache_destroy(info->vgpu); vfree(info); return true; @@ -1372,7 +1404,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) return pfn; pfn = INTEL_GVT_INVALID_ADDR; - dev = &info->vgpu->vdev.mdev->dev; + dev = mdev_dev(info->vgpu->vdev.mdev); rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (rc != 1) { gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450a1946..4df078bc5d04 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) goto err; - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); - if (!mmio && !vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); - - if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); - vgpu->mmio.disable_warn_untrack = true; - } - } - if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (mmio) { if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) @@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; } ret = mmio->read(vgpu, offset, p_data, bytes); - } else + } else { ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); + if (!vgpu->mmio.disable_warn_untrack) { + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", + vgpu->id, offset, bytes, *(u32 *)p_data); + + if (offset == 0x206c) { + gvt_err("------------------------------------------\n"); + gvt_err("vgpu%d: likely triggers a gfx reset\n", + vgpu->id); + gvt_err("------------------------------------------\n"); + vgpu->mmio.disable_warn_untrack = true; + } + } + } + if (ret) goto err; @@ -302,3 +303,56 @@ err: mutex_unlock(&gvt->lock); return ret; } + + +/** + * intel_vgpu_reset_mmio - reset virtual MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); + + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; + + /* set the bit 0:2(Core C-State ) to C0 */ + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; +} + +/** + * intel_vgpu_init_mmio - init MMIO space + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed + */ +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); + if (!vgpu->mmio.vreg) + return -ENOMEM; + + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + + intel_vgpu_reset_mmio(vgpu); + + return 0; +} + +/** + * intel_vgpu_clean_mmio - clean MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) +{ + vfree(vgpu->mmio.vreg); + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 87d5b5e366a3..3bc620f56f35 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, *offset; \ }) +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); + int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d2a0fbc896c3..d9fb41ab7119 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | - GFP_DMA32 | __GFP_ZERO, - INTEL_GVT_OPREGION_PORDER); + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); if (!vgpu_opregion(vgpu)->va) return -ENOMEM; @@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) int i, ret; for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) + mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va + i * PAGE_SIZE); if (mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to get MFN from VA\n"); @@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { map_vgpu_opregion(vgpu, false); free_pages((unsigned long)vgpu_opregion(vgpu)->va, - INTEL_GVT_OPREGION_PORDER); + get_order(INTEL_GVT_OPREGION_SIZE)); vgpu_opregion(vgpu)->va = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789d8f02..fbd023a16f18 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -50,8 +50,7 @@ #define INTEL_GVT_OPREGION_PARM 0x204 #define INTEL_GVT_OPREGION_PAGES 2 -#define INTEL_GVT_OPREGION_PORDER 1 -#define INTEL_GVT_OPREGION_SIZE (2 * 4096) +#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db242250235..e91885dffeff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload; + struct intel_vgpu *vgpu; int event; mutex_lock(&gvt->lock); workload = scheduler->current_workload[ring_id]; + vgpu = workload->vgpu; - if (!workload->status && !workload->vgpu->resetting) { + if (!workload->status && !vgpu->resetting) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(workload->vgpu, - event); + intel_vgpu_trigger_virtual_event(vgpu, event); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; - atomic_dec(&workload->vgpu->running_workload_num); - list_del_init(&workload->list); workload->complete(workload); + atomic_dec(&vgpu->running_workload_num); wake_up(&scheduler->workload_complete_wq); mutex_unlock(&gvt->lock); } @@ -459,11 +459,11 @@ complete: gvt_dbg_sched("will complete workload %p\n, status: %d\n", workload, workload->status); - complete_current_workload(gvt, ring_id); - if (workload->req) i915_gem_request_put(fetch_and_zero(&workload->req)); + complete_current_workload(gvt, ring_id); + if (need_force_wake) intel_uncore_forcewake_put(gvt->dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3b30c28bff51..2833dfa8c9ae 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { struct drm_i915_gem_object *obj; void *va; unsigned long len; - void *bb_start_cmd_va; + u32 *bb_start_cmd_va; }; #define workload_q_head(vgpu, ring_id) \ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9d5777..7295bc8e12fb 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -35,79 +35,6 @@ #include "gvt.h" #include "i915_pvinfo.h" -static void clean_vgpu_mmio(struct intel_vgpu *vgpu) -{ - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; -} - -int setup_vgpu_mmio(struct intel_vgpu *vgpu) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - - if (vgpu->mmio.vreg) - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); - else { - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) - return -ENOMEM; - } - - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; - - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); - - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; - - /* set the bit 0:2(Core C-State ) to C0 */ - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; - return 0; -} - -static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - u16 *gmch_ctl; - int i; - - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, - info->cfg_space_size); - - if (!param->primary) { - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - } - - /* Show guest that there isn't any stolen memory.*/ - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); - - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, - gvt_aperture_pa_base(gvt), true); - - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO - | PCI_COMMAND_MEMORY - | PCI_COMMAND_MASTER); - /* - * Clear the bar upper 32bit and let guest to assign the new value - */ - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } -} - void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ @@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) if (low_avail / min_low == 0) break; gvt->types[i].low_gm_size = min_low; - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].fence = 4; gvt->types[i].max_instance = low_avail / min_low; gvt->types[i].avail_instance = gvt->types[i].max_instance; @@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; @@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); vfree(vgpu); intel_gvt_update_vgpu_types(gvt); @@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->gvt = gvt; bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); - setup_vgpu_cfg_space(vgpu, param); + intel_vgpu_init_cfg_space(vgpu, param->primary); - ret = setup_vgpu_mmio(vgpu); + ret = intel_vgpu_init_mmio(vgpu); if (ret) - goto out_free_vgpu; + goto out_clean_idr; ret = intel_vgpu_alloc_resource(vgpu, param); if (ret) @@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu: out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); +out_clean_idr: + idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); mutex_unlock(&gvt->lock); @@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, } /** - * intel_gvt_reset_vgpu - reset a virtual GPU + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset + * @vgpu: virtual GPU + * @dmlr: vGPU Device Model Level Reset or GT Reset + * @engine_mask: engines to reset for GT reset + * + * This function is called when user wants to reset a virtual GPU through + * device model reset or GT reset. The caller should hold the gvt lock. + * + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset + * the whole vGPU to default state as when it is created. This vGPU function + * is required both for functionary and security concerns.The ultimate goal + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we + * assign a vGPU to a virtual machine we must isse such reset first. + * + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. + * Unlike the FLR, GT reset only reset particular resource of a vGPU per + * the reset request. Guest driver can issue a GT reset by programming the + * virtual GDRST register to reset specific virtual GPU engine or all + * engines. + * + * The parameter dev_level is to identify if we will do DMLR or GT reset. + * The parameter engine_mask is to specific the engines that need to be + * resetted. If value ALL_ENGINES is given for engine_mask, it means + * the caller requests a full GT reset that we will reset all virtual + * GPU engines. For FLR, engine_mask is ignored. + */ +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + + gvt_dbg_core("------------------------------------------\n"); + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", + vgpu->id, dmlr, engine_mask); + vgpu->resetting = true; + + intel_vgpu_stop_schedule(vgpu); + /* + * The current_vgpu will set to NULL after stopping the + * scheduler when the reset is triggered by current vgpu. + */ + if (scheduler->current_vgpu == NULL) { + mutex_unlock(&gvt->lock); + intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&gvt->lock); + } + + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); + + /* full GPU reset or device model level reset */ + if (engine_mask == ALL_ENGINES || dmlr) { + intel_vgpu_reset_gtt(vgpu, dmlr); + intel_vgpu_reset_resource(vgpu); + intel_vgpu_reset_mmio(vgpu); + populate_pvinfo_page(vgpu); + + if (dmlr) + intel_vgpu_reset_cfg_space(vgpu); + } + + vgpu->resetting = false; + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); + gvt_dbg_core("------------------------------------------\n"); +} + +/** + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) * @vgpu: virtual GPU * * This function is called when user wants to reset a virtual GPU. @@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->lock); + intel_gvt_reset_vgpu_locked(vgpu, true, 0); + mutex_unlock(&vgpu->gvt->lock); } |