diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 298 |
1 files changed, 187 insertions, 111 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 64d67ff9bf08..a529d2bd393c 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, u64 h_addr; int ret; - ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT, + ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, &h_addr); if (ret) return ret; - *h_index = h_addr >> GTT_PAGE_SHIFT; + *h_index = h_addr >> I915_GTT_PAGE_SHIFT; return 0; } @@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, u64 g_addr; int ret; - ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT, + ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, &g_addr); if (ret) return ret; - *g_index = g_addr >> GTT_PAGE_SHIFT; + *g_index = g_addr >> I915_GTT_PAGE_SHIFT; return 0; } @@ -156,13 +156,15 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, struct gtt_type_table_entry { int entry_type; + int pt_type; int next_pt_type; int pse_entry_type; }; -#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \ +#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ [type] = { \ .entry_type = e_type, \ + .pt_type = cpt_type, \ .next_pt_type = npt_type, \ .pse_entry_type = pse_type, \ } @@ -170,55 +172,68 @@ struct gtt_type_table_entry { static struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_PPGTT_ROOT_L4_ENTRY, + GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PML4_ENTRY, + GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, GTT_TYPE_PPGTT_PML4_ENTRY, + GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, GTT_TYPE_PPGTT_ROOT_L3_ENTRY, + GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, + GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, GTT_TYPE_PPGTT_PDP_ENTRY, + GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, GTT_TYPE_GGTT_PTE, GTT_TYPE_INVALID, + GTT_TYPE_INVALID, GTT_TYPE_INVALID), }; @@ -227,6 +242,11 @@ static inline int get_next_pt_type(int type) return gtt_type_table[type].next_pt_type; } +static inline int get_pt_type(int type) +{ + return gtt_type_table[type].pt_type; +} + static inline int get_entry_type(int type) { return gtt_type_table[type].entry_type; @@ -351,7 +371,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) return false; e->type = get_entry_type(e->type); - if (!(e->val64 & (1 << 7))) + if (!(e->val64 & BIT(7))) return false; e->type = get_pse_type(e->type); @@ -369,12 +389,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else - return (e->val64 & (1 << 0)); + return (e->val64 & BIT(0)); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { - e->val64 &= ~(1 << 0); + e->val64 &= ~BIT(0); +} + +static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) +{ + e->val64 |= BIT(0); } /* @@ -382,7 +407,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) */ static unsigned long gma_to_ggtt_pte_index(unsigned long gma) { - unsigned long x = (gma >> GTT_PAGE_SHIFT); + unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); trace_gma_index(__func__, gma, x); return x; @@ -406,6 +431,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .get_entry = gtt_get_entry64, .set_entry = gtt_set_entry64, .clear_present = gtt_entry_clear_present, + .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, .get_pfn = gen8_gtt_get_pfn, @@ -494,7 +520,7 @@ static inline int ppgtt_spt_get_entry( return -EINVAL; ret = ops->get_entry(page_table, e, index, guest, - spt->guest_page.gfn << GTT_PAGE_SHIFT, + spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, spt->vgpu); if (ret) return ret; @@ -516,7 +542,7 @@ static inline int ppgtt_spt_set_entry( return -EINVAL; return ops->set_entry(page_table, e, index, guest, - spt->guest_page.gfn << GTT_PAGE_SHIFT, + spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, spt->vgpu); } @@ -537,88 +563,103 @@ static inline int ppgtt_spt_set_entry( spt->shadow_page.type, e, index, false) /** - * intel_vgpu_init_guest_page - init a guest page data structure + * intel_vgpu_init_page_track - init a page track data structure * @vgpu: a vGPU - * @p: a guest page data structure + * @t: a page track data structure * @gfn: guest memory page frame number - * @handler: function will be called when target guest memory page has + * @handler: the function will be called when target guest memory page has * been modified. * - * This function is called when user wants to track a guest memory page. + * This function is called when a user wants to prepare a page track data + * structure to track a guest memory page. * * Returns: * Zero on success, negative error code if failed. */ -int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *p, +int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t, unsigned long gfn, int (*handler)(void *, u64, void *, int), void *data) { - INIT_HLIST_NODE(&p->node); + INIT_HLIST_NODE(&t->node); - p->writeprotection = false; - p->gfn = gfn; - p->handler = handler; - p->data = data; - p->oos_page = NULL; - p->write_cnt = 0; + t->tracked = false; + t->gfn = gfn; + t->handler = handler; + t->data = data; - hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn); + hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn); return 0; } -static int detach_oos_page(struct intel_vgpu *vgpu, - struct intel_vgpu_oos_page *oos_page); - /** - * intel_vgpu_clean_guest_page - release the resource owned by guest page data - * structure + * intel_vgpu_clean_page_track - release a page track data structure * @vgpu: a vGPU - * @p: a tracked guest page + * @t: a page track data structure * - * This function is called when user tries to stop tracking a guest memory - * page. + * This function is called before a user frees a page track data structure. */ -void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu, - struct intel_vgpu_guest_page *p) +void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, + struct intel_vgpu_page_track *t) { - if (!hlist_unhashed(&p->node)) - hash_del(&p->node); - - if (p->oos_page) - detach_oos_page(vgpu, p->oos_page); + if (!hlist_unhashed(&t->node)) + hash_del(&t->node); - if (p->writeprotection) - intel_gvt_hypervisor_unset_wp_page(vgpu, p); + if (t->tracked) + intel_gvt_hypervisor_disable_page_track(vgpu, t); } /** - * intel_vgpu_find_guest_page - find a guest page data structure by GFN. + * intel_vgpu_find_tracked_page - find a tracked guest page * @vgpu: a vGPU * @gfn: guest memory page frame number * - * This function is called when emulation logic wants to know if a trapped GFN - * is a tracked guest page. + * This function is called when the emulation layer wants to figure out if a + * trapped GFN is a tracked guest page. * * Returns: - * Pointer to guest page data structure, NULL if failed. + * Pointer to page track data structure, NULL if not found. */ -struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( +struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( struct intel_vgpu *vgpu, unsigned long gfn) { - struct intel_vgpu_guest_page *p; + struct intel_vgpu_page_track *t; - hash_for_each_possible(vgpu->gtt.guest_page_hash_table, - p, node, gfn) { - if (p->gfn == gfn) - return p; + hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table, + t, node, gfn) { + if (t->gfn == gfn) + return t; } return NULL; } +static int init_guest_page(struct intel_vgpu *vgpu, + struct intel_vgpu_guest_page *p, + unsigned long gfn, + int (*handler)(void *, u64, void *, int), + void *data) +{ + p->oos_page = NULL; + p->write_cnt = 0; + + return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data); +} + +static int detach_oos_page(struct intel_vgpu *vgpu, + struct intel_vgpu_oos_page *oos_page); + +static void clean_guest_page(struct intel_vgpu *vgpu, + struct intel_vgpu_guest_page *p) +{ + if (p->oos_page) + detach_oos_page(vgpu, p->oos_page); + + intel_vgpu_clean_page_track(vgpu, &p->track); +} + static inline int init_shadow_page(struct intel_vgpu *vgpu, - struct intel_vgpu_shadow_page *p, int type) + struct intel_vgpu_shadow_page *p, int type, bool hash) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; dma_addr_t daddr; @@ -634,8 +675,9 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, INIT_HLIST_NODE(&p->node); - p->mfn = daddr >> GTT_PAGE_SHIFT; - hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); + p->mfn = daddr >> I915_GTT_PAGE_SHIFT; + if (hash) + hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); return 0; } @@ -644,7 +686,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu, { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; - dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, + dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096, PCI_DMA_BIDIRECTIONAL); if (!hlist_unhashed(&p->node)) @@ -664,6 +706,9 @@ static inline struct intel_vgpu_shadow_page *find_shadow_page( return NULL; } +#define page_track_to_guest_page(ptr) \ + container_of(ptr, struct intel_vgpu_guest_page, track) + #define guest_page_to_ppgtt_spt(ptr) \ container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page) @@ -697,7 +742,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); clean_shadow_page(spt->vgpu, &spt->shadow_page); - intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); + clean_guest_page(spt->vgpu, &spt->guest_page); list_del_init(&spt->post_shadow_list); free_spt(spt); @@ -713,22 +758,24 @@ static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp)); } -static int ppgtt_handle_guest_write_page_table_bytes(void *gp, +static int ppgtt_handle_guest_write_page_table_bytes( + struct intel_vgpu_guest_page *gpt, u64 pa, void *p_data, int bytes); -static int ppgtt_write_protection_handler(void *gp, u64 pa, +static int ppgtt_write_protection_handler(void *data, u64 pa, void *p_data, int bytes) { - struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp; + struct intel_vgpu_page_track *t = data; + struct intel_vgpu_guest_page *p = page_track_to_guest_page(t); int ret; if (bytes != 4 && bytes != 8) return -EINVAL; - if (!gpt->writeprotection) + if (!t->tracked) return -EINVAL; - ret = ppgtt_handle_guest_write_page_table_bytes(gp, + ret = ppgtt_handle_guest_write_page_table_bytes(p, pa, p_data, bytes); if (ret) return ret; @@ -762,13 +809,13 @@ retry: * TODO: guest page type may be different with shadow page type, * when we support PSE page in future. */ - ret = init_shadow_page(vgpu, &spt->shadow_page, type); + ret = init_shadow_page(vgpu, &spt->shadow_page, type, true); if (ret) { gvt_vgpu_err("fail to initialize shadow page for spt\n"); goto err; } - ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, + ret = init_guest_page(vgpu, &spt->guest_page, gfn, ppgtt_write_protection_handler, NULL); if (ret) { gvt_vgpu_err("fail to initialize guest page for spt\n"); @@ -798,7 +845,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) #define pt_entries(spt) \ - (GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) + (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) #define for_each_present_guest_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); i++) \ @@ -856,7 +903,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) int v = atomic_read(&spt->refcount); trace_spt_change(spt->vgpu->id, "die", spt, - spt->guest_page.gfn, spt->shadow_page.type); + spt->guest_page.track.gfn, spt->shadow_page.type); trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); @@ -878,7 +925,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) } release: trace_spt_change(spt->vgpu->id, "release", spt, - spt->guest_page.gfn, spt->shadow_page.type); + spt->guest_page.track.gfn, spt->shadow_page.type); ppgtt_free_shadow_page(spt); return 0; fail: @@ -895,6 +942,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s = NULL; struct intel_vgpu_guest_page *g; + struct intel_vgpu_page_track *t; int ret; if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) { @@ -902,8 +950,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( goto fail; } - g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we)); - if (g) { + t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we)); + if (t) { + g = page_track_to_guest_page(t); s = guest_page_to_ppgtt_spt(g); ppgtt_get_shadow_page(s); } else { @@ -915,7 +964,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( goto fail; } - ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page); + ret = intel_gvt_hypervisor_enable_page_track(vgpu, + &s->guest_page.track); if (ret) goto fail; @@ -923,7 +973,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( if (ret) goto fail; - trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn, + trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn, s->shadow_page.type); } return s; @@ -953,7 +1003,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) int ret; trace_spt_change(spt->vgpu->id, "born", spt, - spt->guest_page.gfn, spt->shadow_page.type); + spt->guest_page.track.gfn, spt->shadow_page.type); if (gtt_type_is_pte_pt(spt->shadow_page.type)) { for_each_present_guest_entry(spt, &ge, i) { @@ -1078,11 +1128,11 @@ static int sync_oos_page(struct intel_vgpu *vgpu, old.type = new.type = get_entry_type(spt->guest_page_type); old.val64 = new.val64 = 0; - for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift); - index++) { + for (index = 0; index < (I915_GTT_PAGE_SIZE >> + info->gtt_entry_size_shift); index++) { ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); ops->get_entry(NULL, &new, index, true, - oos_page->guest_page->gfn << PAGE_SHIFT, vgpu); + oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); if (old.val64 == new.val64 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) @@ -1132,8 +1182,9 @@ static int attach_oos_page(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; int ret; - ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT, - oos_page->mem, GTT_PAGE_SIZE); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + gpt->track.gfn << I915_GTT_PAGE_SHIFT, + oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) return ret; @@ -1152,7 +1203,7 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu, { int ret; - ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt); + ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track); if (ret) return ret; @@ -1200,7 +1251,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu, gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head); - return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt); + return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track); } /** @@ -1335,10 +1386,10 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) return 0; } -static int ppgtt_handle_guest_write_page_table_bytes(void *gp, +static int ppgtt_handle_guest_write_page_table_bytes( + struct intel_vgpu_guest_page *gpt, u64 pa, void *p_data, int bytes) { - struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp; struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; @@ -1418,7 +1469,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) mm->shadow_page_table = mem + mm->page_table_entry_size; } else if (mm->type == INTEL_GVT_MM_GGTT) { mm->page_table_entry_cnt = - (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT); + (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT); mm->page_table_entry_size = mm->page_table_entry_cnt * info->gtt_entry_size; mem = vzalloc(mm->page_table_entry_size); @@ -1740,8 +1791,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) gma_ops->gma_to_ggtt_pte_index(gma)); if (ret) goto err; - gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) - + (gma & ~GTT_PAGE_MASK); + gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + + (gma & ~I915_GTT_PAGE_MASK); trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); return gpa; @@ -1793,8 +1844,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) } } - gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) - + (gma & ~GTT_PAGE_MASK); + gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + + (gma & ~I915_GTT_PAGE_MASK); trace_gma_translate(vgpu->id, "ppgtt", 0, mm->page_table_level, gma, gpa); @@ -1862,7 +1913,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (bytes != 4 && bytes != 8) return -EINVAL; - gma = g_gtt_index << GTT_PAGE_SHIFT; + gma = g_gtt_index << I915_GTT_PAGE_SHIFT; /* the VM may configure the whole GM space when ballooning is used */ if (!vgpu_gmadr_is_valid(vgpu, gma)) @@ -1881,11 +1932,11 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, * update the entry in this situation p2m will fail * settting the shadow entry to point to a scratch page */ - ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); } } else { m = e; - ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); } ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); @@ -1920,12 +1971,45 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; } +int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes) +{ + struct intel_gvt *gvt = vgpu->gvt; + int ret = 0; + + if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { + struct intel_vgpu_page_track *t; + + mutex_lock(&gvt->lock); + + t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); + if (t) { + if (unlikely(vgpu->failsafe)) { + /* remove write protection to prevent furture traps */ + intel_vgpu_clean_page_track(vgpu, t); + } else { + ret = t->handler(t, pa, p_data, bytes); + if (ret) { + gvt_err("guest page write error %d, " + "gfn 0x%lx, pa 0x%llx, " + "var 0x%x, len %d\n", + ret, t->gfn, pa, + *(u32 *)p_data, bytes); + } + } + } + mutex_unlock(&gvt->lock); + } + return ret; +} + + static int alloc_scratch_pages(struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - int page_entry_num = GTT_PAGE_SIZE >> + int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; int i; @@ -1949,7 +2033,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, return -ENOMEM; } gtt->scratch_pt[type].page_mfn = - (unsigned long)(daddr >> GTT_PAGE_SHIFT); + (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, gtt->scratch_pt[type].page_mfn); @@ -1992,7 +2076,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu) for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { if (vgpu->gtt.scratch_pt[i].page != NULL) { daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << - GTT_PAGE_SHIFT); + I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); __free_page(vgpu->gtt.scratch_pt[i].page); vgpu->gtt.scratch_pt[i].page = NULL; @@ -2035,7 +2119,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_vgpu_mm *ggtt_mm; - hash_init(gtt->guest_page_hash_table); + hash_init(gtt->tracked_guest_page_hash_table); hash_init(gtt->shadow_page_hash_table); INIT_LIST_HEAD(>t->mm_list_head); @@ -2196,7 +2280,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int page_table_level) { - u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); + u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); struct intel_vgpu_mm *mm; if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) @@ -2231,7 +2315,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int page_table_level) { - u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); + u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); struct intel_vgpu_mm *mm; if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) @@ -2288,15 +2372,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) __free_page(virt_to_page(page)); return -ENOMEM; } - gvt->gtt.scratch_ggtt_page = virt_to_page(page); - gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); + + gvt->gtt.scratch_page = virt_to_page(page); + gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); if (enable_out_of_sync) { ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); - __free_page(gvt->gtt.scratch_ggtt_page); + __free_page(gvt->gtt.scratch_page); return ret; } } @@ -2315,12 +2400,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) void intel_gvt_clean_gtt(struct intel_gvt *gvt) { struct device *dev = &gvt->dev_priv->drm.pdev->dev; - dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << - GTT_PAGE_SHIFT); + dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << + I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); - __free_page(gvt->gtt.scratch_ggtt_page); + __free_page(gvt->gtt.scratch_page); if (enable_out_of_sync) clean_spt_oos(gvt); @@ -2346,7 +2431,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); e.type = GTT_TYPE_GGTT_PTE; - ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); + ops->set_pfn(&e, gvt->gtt.scratch_mfn); e.val64 |= _PAGE_PRESENT; index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; @@ -2372,8 +2457,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) */ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) { - int i; - ppgtt_free_all_shadow_page(vgpu); /* Shadow pages are only created when there is no page @@ -2383,11 +2466,4 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); intel_vgpu_reset_ggtt(vgpu); - - /* clear scratch page for security */ - for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { - if (vgpu->gtt.scratch_pt[i].page != NULL) - memset(page_address(vgpu->gtt.scratch_pt[i].page), - 0, PAGE_SIZE); - } } |