From 753b43c9d1b73337610861a6e644f8df3635d656 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:07 +0800 Subject: KVM: x86/mmu: Use 64-bit address to invalidate to fix a subtle bug FNAME(invlpg)() and kvm_mmu_invalidate_gva() take a gva_t, i.e. unsigned long, as the type of the address to invalidate. On 32-bit kernels, the upper 32 bits of the GPA will get dropped when an L2 GPA address is invalidated in the shadowed nested TDP MMU. Convert it to u64 to fix the problem. Reported-by: Sean Christopherson Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-2-jiangshanlai@gmail.com [sean: tweak changelog] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a45de1118a42..214636554675 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -441,7 +441,7 @@ struct kvm_mmu { struct x86_exception *exception); int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); - void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); + void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; union kvm_mmu_page_role root_role; @@ -2044,8 +2044,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); -void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gva_t gva, hpa_t root_hpa); +void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + u64 addr, hpa_t root_hpa); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); -- cgit v1.2.3 From c3c6c9fc5d24bcafbbeda2edb521b70f5df052b7 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:11 +0800 Subject: KVM: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c Rename mmu->sync_page to mmu->sync_spte and move the code out of FNAME(sync_page)'s loop body into mmu.c. No functionalities change intended. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-6-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 4 +- arch/x86/kvm/mmu/mmu.c | 34 +++++++++--- arch/x86/kvm/mmu/paging_tmpl.h | 114 +++++++++++++++++----------------------- 3 files changed, 76 insertions(+), 76 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 214636554675..b7526c15501e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -439,8 +439,8 @@ struct kvm_mmu { gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t gva_or_gpa, u64 access, struct x86_exception *exception); - int (*sync_page)(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp); + int (*sync_spte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, int i); void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7fd824cf0a25..cb01e01e0404 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1934,7 +1934,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the * reserved bits checks will be wrong, etc... */ - if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page || + if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte || (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) return false; @@ -1943,10 +1943,30 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { + int flush = 0; + int i; + if (!kvm_sync_page_check(vcpu, sp)) return -1; - return vcpu->arch.mmu->sync_page(vcpu, sp); + for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { + int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i); + + if (ret < -1) + return -1; + flush |= ret; + } + + /* + * Note, any flush is purely for KVM's correctness, e.g. when dropping + * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier + * unmap or dirty logging event doesn't fail to flush. The guest is + * responsible for flushing the TLB to ensure any changes in protection + * bits are recognized, i.e. until the guest flushes or page faults on + * a relevant address, KVM is architecturally allowed to let vCPUs use + * cached translations with the old protection bits. + */ + return flush; } static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, @@ -4504,7 +4524,7 @@ static void nonpaging_init_context(struct kvm_mmu *context) { context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; - context->sync_page = NULL; + context->sync_spte = NULL; context->invlpg = NULL; } @@ -5095,7 +5115,7 @@ static void paging64_init_context(struct kvm_mmu *context) { context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; - context->sync_page = paging64_sync_page; + context->sync_spte = paging64_sync_spte; context->invlpg = paging64_invlpg; } @@ -5103,7 +5123,7 @@ static void paging32_init_context(struct kvm_mmu *context) { context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; - context->sync_page = paging32_sync_page; + context->sync_spte = paging32_sync_spte; context->invlpg = paging32_invlpg; } @@ -5192,7 +5212,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->cpu_role.as_u64 = cpu_role.as_u64; context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; - context->sync_page = NULL; + context->sync_spte = NULL; context->invlpg = NULL; context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; @@ -5324,7 +5344,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->page_fault = ept_page_fault; context->gva_to_gpa = ept_gva_to_gpa; - context->sync_page = ept_sync_page; + context->sync_spte = ept_sync_spte; context->invlpg = ept_invlpg; update_permission_bitmask(context, true); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index abb1d325ad30..c07b2e7def37 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, * can't change unless all sptes pointing to it are nuked first. * * Returns - * < 0: the sp should be zapped - * 0: the sp is synced and no tlb flushing is required - * > 0: the sp is synced and tlb flushing is required + * < 0: failed to sync spte + * 0: the spte is synced and no tlb flushing is required + * > 0: the spte is synced and tlb flushing is required */ -static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) { - int i; bool host_writable; gpa_t first_pte_gpa; - bool flush = false; - - first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); - - for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { - u64 *sptep, spte; - struct kvm_memory_slot *slot; - unsigned pte_access; - pt_element_t gpte; - gpa_t pte_gpa; - gfn_t gfn; - - if (!sp->spt[i]) - continue; + u64 *sptep, spte; + struct kvm_memory_slot *slot; + unsigned pte_access; + pt_element_t gpte; + gpa_t pte_gpa; + gfn_t gfn; - pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); + if (!sp->spt[i]) + return 0; - if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, - sizeof(pt_element_t))) - return -1; + first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); + pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); - if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { - flush = true; - continue; - } + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + return -1; - gfn = gpte_to_gfn(gpte); - pte_access = sp->role.access; - pte_access &= FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) + return 1; - if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) - continue; + gfn = gpte_to_gfn(gpte); + pte_access = sp->role.access; + pte_access &= FNAME(gpte_access)(gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - /* - * Drop the SPTE if the new protections would result in a RWX=0 - * SPTE or if the gfn is changing. The RWX=0 case only affects - * EPT with execute-only support, i.e. EPT without an effective - * "present" bit, as all other paging modes will create a - * read-only SPTE if pte_access is zero. - */ - if ((!pte_access && !shadow_present_mask) || - gfn != kvm_mmu_page_get_gfn(sp, i)) { - drop_spte(vcpu->kvm, &sp->spt[i]); - flush = true; - continue; - } + if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) + return 0; - /* Update the shadowed access bits in case they changed. */ - kvm_mmu_page_set_access(sp, i, pte_access); + /* + * Drop the SPTE if the new protections would result in a RWX=0 + * SPTE or if the gfn is changing. The RWX=0 case only affects + * EPT with execute-only support, i.e. EPT without an effective + * "present" bit, as all other paging modes will create a + * read-only SPTE if pte_access is zero. + */ + if ((!pte_access && !shadow_present_mask) || + gfn != kvm_mmu_page_get_gfn(sp, i)) { + drop_spte(vcpu->kvm, &sp->spt[i]); + return 1; + } - sptep = &sp->spt[i]; - spte = *sptep; - host_writable = spte & shadow_host_writable_mask; - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - make_spte(vcpu, sp, slot, pte_access, gfn, - spte_to_pfn(spte), spte, true, false, - host_writable, &spte); + /* Update the shadowed access bits in case they changed. */ + kvm_mmu_page_set_access(sp, i, pte_access); - flush |= mmu_spte_update(sptep, spte); - } + sptep = &sp->spt[i]; + spte = *sptep; + host_writable = spte & shadow_host_writable_mask; + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + make_spte(vcpu, sp, slot, pte_access, gfn, + spte_to_pfn(spte), spte, true, false, + host_writable, &spte); - /* - * Note, any flush is purely for KVM's correctness, e.g. when dropping - * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier - * unmap or dirty logging event doesn't fail to flush. The guest is - * responsible for flushing the TLB to ensure any changes in protection - * bits are recognized, i.e. until the guest flushes or page faults on - * a relevant address, KVM is architecturally allowed to let vCPUs use - * cached translations with the old protection bits. - */ - return flush; + return mmu_spte_update(sptep, spte); } #undef pt_element_t -- cgit v1.2.3 From f94db0c8b9fa50296d00e236e2416aea11186e18 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 16 Feb 2023 23:41:13 +0800 Subject: KVM: x86/mmu: Sanity check input to kvm_mmu_free_roots() Tweak KVM_MMU_ROOTS_ALL to precisely cover all current+previous root flags, and add a sanity in kvm_mmu_free_roots() to verify that the set of roots to free doesn't stray outside KVM_MMU_ROOTS_ALL. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-8-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 8 ++++---- arch/x86/kvm/mmu/mmu.c | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b7526c15501e..c23c49e97ce2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -420,6 +420,10 @@ struct kvm_mmu_root_info { #define KVM_MMU_NUM_PREV_ROOTS 3 +#define KVM_MMU_ROOT_CURRENT BIT(0) +#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) +#define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1) + #define KVM_HAVE_MMU_RWLOCK struct kvm_mmu_page; @@ -1997,10 +2001,6 @@ static inline int __kvm_irq_line_state(unsigned long *irq_state, return !!(*irq_state); } -#define KVM_MMU_ROOT_CURRENT BIT(0) -#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) -#define KVM_MMU_ROOTS_ALL (~0UL) - int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cb01e01e0404..ffc18f5a1adf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3568,6 +3568,8 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, LIST_HEAD(invalid_list); bool free_active_root; + WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL); + BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); /* Before acquiring the MMU lock, see if we need to do any real work. */ -- cgit v1.2.3 From cd42853e9530fb64097cbe83abce10a34e68cdf4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:14 +0800 Subject: kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_addr() The @root_hpa for kvm_mmu_invalidate_addr() is called with @mmu->root.hpa or INVALID_PAGE where @mmu->root.hpa is to invalidate gva for the current root (the same meaning as KVM_MMU_ROOT_CURRENT) and INVALID_PAGE is to invalidate gva for all roots (the same meaning as KVM_MMU_ROOTS_ALL). Change the argument type of kvm_mmu_invalidate_addr() and use KVM_MMU_ROOT_XXX instead so that we can reuse the function for kvm_mmu_invpcid_gva() and nested_ept_invalidate_addr() for invalidating gva for different set of roots. No fuctionalities changed. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-9-jiangshanlai@gmail.com [sean: massage comment slightly] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu/mmu.c | 38 +++++++++++++++++++------------------- arch/x86/kvm/x86.c | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c23c49e97ce2..9a2c5925875b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2045,7 +2045,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - u64 addr, hpa_t root_hpa); + u64 addr, unsigned long roots); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ffc18f5a1adf..a9cbd94ea858 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5765,10 +5765,12 @@ emulate: EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - u64 addr, hpa_t root_hpa) + u64 addr, unsigned long roots) { int i; + WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL); + /* It's actually a GPA for vcpu->arch.guest_mmu. */ if (mmu != &vcpu->arch.guest_mmu) { /* INVLPG on a non-canonical address is a NOP according to the SDM. */ @@ -5781,31 +5783,29 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, if (!mmu->invlpg) return; - if (root_hpa == INVALID_PAGE) { + if (roots & KVM_MMU_ROOT_CURRENT) mmu->invlpg(vcpu, addr, mmu->root.hpa); - /* - * INVLPG is required to invalidate any global mappings for the VA, - * irrespective of PCID. Since it would take us roughly similar amount - * of work to determine whether any of the prev_root mappings of the VA - * is marked global, or to just sync it blindly, so we might as well - * just always sync it. - * - * Mappings not reachable via the current cr3 or the prev_roots will be - * synced when switching to that cr3, so nothing needs to be done here - * for them. - */ - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (VALID_PAGE(mmu->prev_roots[i].hpa)) - mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); - } else { - mmu->invlpg(vcpu, addr, root_hpa); + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { + if ((roots & KVM_MMU_ROOT_PREVIOUS(i)) && + VALID_PAGE(mmu->prev_roots[i].hpa)) + mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); } } void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); + /* + * INVLPG is required to invalidate any global mappings for the VA, + * irrespective of PCID. Blindly sync all roots as it would take + * roughly the same amount of work/time to determine whether any of the + * previous roots have a global mapping. + * + * Mappings not reachable via the current or previous cached roots will + * be synced when switching to that new cr3, so nothing needs to be + * done here for them. + */ + kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0b6b587d7914..e5a94d2228b0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -803,7 +803,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, if ((fault->error_code & PFERR_PRESENT_MASK) && !(fault->error_code & PFERR_RSVD_MASK)) kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, - fault_mmu->root.hpa); + KVM_MMU_ROOT_CURRENT); fault_mmu->inject_page_fault(vcpu, fault); } -- cgit v1.2.3 From 9fd4a4e3a3d9fc0306525d95bf3eca693d311406 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 17 Feb 2023 07:53:19 +0800 Subject: KVM: x86/mmu: Remove FNAME(invlpg) and use FNAME(sync_spte) to update vTLB instead. In hardware TLB, invalidating TLB entries means the translations are removed from the TLB. In KVM shadowed vTLB, the translations (combinations of shadow paging and hardware TLB) are generally maintained as long as they remain "clean" when the TLB of an address space (i.e. a PCID or all) is flushed with the help of write-protections, sp->unsync, and kvm_sync_page(), where "clean" in this context means that no updates to KVM's SPTEs are needed. However, FNAME(invlpg) always zaps/removes the vTLB if the shadow page is unsync, and thus triggers a remote flush even if the original vTLB entry is clean, i.e. is usable as-is. Besides this, FNAME(invlpg) is largely is a duplicate implementation of FNAME(sync_spte) to invalidate a vTLB entry. To address both issues, reuse FNAME(sync_spte) to share the code and slightly modify the semantics, i.e. keep the vTLB entry if it's "clean" and avoid remote TLB flush. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216235321.735214-3-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++++++++------------ arch/x86/kvm/mmu/paging_tmpl.h | 57 ----------------------------------------- 3 files changed, 33 insertions(+), 75 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9a2c5925875b..991296a2f807 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -445,7 +445,6 @@ struct kvm_mmu { struct x86_exception *exception); int (*sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i); - void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; union kvm_mmu_page_role root_role; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 32291a84fdc6..5bca99e50d1a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1073,14 +1073,6 @@ static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; } -static bool rmap_can_add(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu_memory_cache *mc; - - mc = &vcpu->arch.mmu_pte_list_desc_cache; - return kvm_mmu_memory_cache_nr_free_objects(mc); -} - static void rmap_remove(struct kvm *kvm, u64 *spte) { struct kvm_memslots *slots; @@ -4527,7 +4519,6 @@ static void nonpaging_init_context(struct kvm_mmu *context) context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; context->sync_spte = NULL; - context->invlpg = NULL; } static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, @@ -5118,7 +5109,6 @@ static void paging64_init_context(struct kvm_mmu *context) context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; context->sync_spte = paging64_sync_spte; - context->invlpg = paging64_invlpg; } static void paging32_init_context(struct kvm_mmu *context) @@ -5126,7 +5116,6 @@ static void paging32_init_context(struct kvm_mmu *context) context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; context->sync_spte = paging32_sync_spte; - context->invlpg = paging32_invlpg; } static union kvm_cpu_role @@ -5215,7 +5204,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; context->sync_spte = NULL; - context->invlpg = NULL; context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; @@ -5347,7 +5335,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->page_fault = ept_page_fault; context->gva_to_gpa = ept_gva_to_gpa; context->sync_spte = ept_sync_spte; - context->invlpg = ept_invlpg; update_permission_bitmask(context, true); context->pkru_mask = 0; @@ -5388,7 +5375,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, * L2 page tables are never shadowed, so there is no need to sync * SPTEs. */ - g_context->invlpg = NULL; + g_context->sync_spte = NULL; /* * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using @@ -5764,6 +5751,35 @@ emulate: } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); +static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + u64 addr, hpa_t root_hpa) +{ + struct kvm_shadow_walk_iterator iterator; + + vcpu_clear_mmio_info(vcpu, addr); + + if (!VALID_PAGE(root_hpa)) + return; + + write_lock(&vcpu->kvm->mmu_lock); + for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { + struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); + + if (sp->unsync) { + int ret = mmu->sync_spte(vcpu, sp, iterator.index); + + if (ret < 0) + mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL); + if (ret) + kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep); + } + + if (!sp->unsync_children) + break; + } + write_unlock(&vcpu->kvm->mmu_lock); +} + void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots) { @@ -5780,15 +5796,15 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static_call(kvm_x86_flush_tlb_gva)(vcpu, addr); } - if (!mmu->invlpg) + if (!mmu->sync_spte) return; if (roots & KVM_MMU_ROOT_CURRENT) - mmu->invlpg(vcpu, addr, mmu->root.hpa); + __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (roots & KVM_MMU_ROOT_PREVIOUS(i)) - mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); + __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); } } EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 0b4e3c9a3f05..e9d97f9b2ec3 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -846,63 +846,6 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); } -/* Note, @addr is a GPA when invlpg() invalidates an L2 GPA translation in shadowed TDP */ -static void FNAME(invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa) -{ - struct kvm_shadow_walk_iterator iterator; - struct kvm_mmu_page *sp; - u64 old_spte; - int level; - u64 *sptep; - - vcpu_clear_mmio_info(vcpu, addr); - - /* - * No need to check return value here, rmap_can_add() can - * help us to skip pte prefetch later. - */ - mmu_topup_memory_caches(vcpu, true); - - if (!VALID_PAGE(root_hpa)) - return; - - write_lock(&vcpu->kvm->mmu_lock); - for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { - level = iterator.level; - sptep = iterator.sptep; - - sp = sptep_to_sp(sptep); - old_spte = *sptep; - if (is_last_spte(old_spte, level)) { - pt_element_t gpte; - gpa_t pte_gpa; - - if (!sp->unsync) - break; - - pte_gpa = FNAME(get_level1_sp_gpa)(sp); - pte_gpa += spte_index(sptep) * sizeof(pt_element_t); - - mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL); - if (is_shadow_present_pte(old_spte)) - kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep); - - if (!rmap_can_add(vcpu)) - break; - - if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, - sizeof(pt_element_t))) - break; - - FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false); - } - - if (!sp->unsync_children) - break; - } - write_unlock(&vcpu->kvm->mmu_lock); -} - /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t addr, u64 access, -- cgit v1.2.3 From 8a1300ff95185b23baff9c226a001c269108f9ea Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:31:32 -0700 Subject: KVM: x86: Rename Hyper-V remote TLB hooks to match established scheme Rename the Hyper-V hooks for TLB flushing to match the naming scheme used by all the other TLB flushing hooks, e.g. in kvm_x86_ops, vendor code, arch hooks from common code, etc. Reviewed-by: David Matlack Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20230405003133.419177-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm-x86-ops.h | 4 ++-- arch/x86/include/asm/kvm_host.h | 10 +++++----- arch/x86/kvm/kvm_onhyperv.c | 13 ++++++------- arch/x86/kvm/kvm_onhyperv.h | 5 ++--- arch/x86/kvm/mmu/mmu.c | 12 ++++++------ arch/x86/kvm/svm/svm_onhyperv.h | 5 ++--- arch/x86/kvm/vmx/vmx.c | 5 ++--- 7 files changed, 25 insertions(+), 29 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 8dc345cc6318..430ca22170e0 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -54,8 +54,8 @@ KVM_X86_OP(set_rflags) KVM_X86_OP(get_if_flag) KVM_X86_OP(flush_tlb_all) KVM_X86_OP(flush_tlb_current) -KVM_X86_OP_OPTIONAL(tlb_remote_flush) -KVM_X86_OP_OPTIONAL(tlb_remote_flush_with_range) +KVM_X86_OP_OPTIONAL(flush_remote_tlbs) +KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range) KVM_X86_OP(flush_tlb_gva) KVM_X86_OP(flush_tlb_guest) KVM_X86_OP(vcpu_pre_run) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 991296a2f807..ec22101410ee 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1588,9 +1588,9 @@ struct kvm_x86_ops { void (*flush_tlb_all)(struct kvm_vcpu *vcpu); void (*flush_tlb_current)(struct kvm_vcpu *vcpu); - int (*tlb_remote_flush)(struct kvm *kvm); - int (*tlb_remote_flush_with_range)(struct kvm *kvm, - struct kvm_tlb_range *range); + int (*flush_remote_tlbs)(struct kvm *kvm); + int (*flush_remote_tlbs_range)(struct kvm *kvm, + struct kvm_tlb_range *range); /* * Flush any TLB entries associated with the given GVA. @@ -1794,8 +1794,8 @@ void kvm_arch_free_vm(struct kvm *kvm); #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) { - if (kvm_x86_ops.tlb_remote_flush && - !static_call(kvm_x86_tlb_remote_flush)(kvm)) + if (kvm_x86_ops.flush_remote_tlbs && + !static_call(kvm_x86_flush_remote_tlbs)(kvm)) return 0; else return -ENOTSUPP; diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index 482d6639ef88..2e2d08da8a3f 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -29,8 +29,7 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, return hyperv_flush_guest_mapping(root_tdp); } -int hv_remote_flush_tlb_with_range(struct kvm *kvm, - struct kvm_tlb_range *range) +int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) { struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_vcpu *vcpu; @@ -86,19 +85,19 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm, spin_unlock(&kvm_arch->hv_root_tdp_lock); return ret; } -EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range); +EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); -int hv_remote_flush_tlb(struct kvm *kvm) +int hv_flush_remote_tlbs(struct kvm *kvm) { - return hv_remote_flush_tlb_with_range(kvm, NULL); + return hv_flush_remote_tlbs_range(kvm, NULL); } -EXPORT_SYMBOL_GPL(hv_remote_flush_tlb); +EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) { struct kvm_arch *kvm_arch = &vcpu->kvm->arch; - if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) { + if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) { spin_lock(&kvm_arch->hv_root_tdp_lock); vcpu->arch.hv_root_tdp = root_tdp; if (root_tdp != kvm_arch->hv_root_tdp) diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h index 287e98ef9df3..55d7fcb84cc1 100644 --- a/arch/x86/kvm/kvm_onhyperv.h +++ b/arch/x86/kvm/kvm_onhyperv.h @@ -7,9 +7,8 @@ #define __ARCH_X86_KVM_KVM_ONHYPERV_H__ #if IS_ENABLED(CONFIG_HYPERV) -int hv_remote_flush_tlb_with_range(struct kvm *kvm, - struct kvm_tlb_range *range); -int hv_remote_flush_tlb(struct kvm *kvm); +int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range); +int hv_flush_remote_tlbs(struct kvm *kvm); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); #else /* !CONFIG_HYPERV */ static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4c874d4ec68f..7654be48ff69 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -270,9 +270,9 @@ static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, return mmu->get_guest_pgd(vcpu); } -static inline bool kvm_available_flush_tlb_with_range(void) +static inline bool kvm_available_flush_remote_tlbs_range(void) { - return kvm_x86_ops.tlb_remote_flush_with_range; + return kvm_x86_ops.flush_remote_tlbs_range; } void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, @@ -284,8 +284,8 @@ void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, range.start_gfn = start_gfn; range.pages = nr_pages; - if (kvm_x86_ops.tlb_remote_flush_with_range) - ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, &range); + if (kvm_x86_ops.flush_remote_tlbs_range) + ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, &range); if (ret) kvm_flush_remote_tlbs(kvm); @@ -1498,7 +1498,7 @@ restart: } } - if (need_flush && kvm_available_flush_tlb_with_range()) { + if (need_flush && kvm_available_flush_remote_tlbs_range()) { kvm_flush_remote_tlbs_gfn(kvm, gfn, level); return false; } @@ -6623,7 +6623,7 @@ restart: PG_LEVEL_NUM)) { kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); - if (kvm_available_flush_tlb_with_range()) + if (kvm_available_flush_remote_tlbs_range()) kvm_flush_remote_tlbs_sptep(kvm, sptep); else need_tlb_flush = 1; diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h index cff838f15db5..823001033539 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.h +++ b/arch/x86/kvm/svm/svm_onhyperv.h @@ -35,9 +35,8 @@ static inline __init void svm_hv_hardware_setup(void) if (npt_enabled && ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) { pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n"); - svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; - svm_x86_ops.tlb_remote_flush_with_range = - hv_remote_flush_tlb_with_range; + svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs; + svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range; } if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..8031bded75cc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8432,9 +8432,8 @@ static __init int hardware_setup(void) #if IS_ENABLED(CONFIG_HYPERV) if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH && enable_ept) { - vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; - vmx_x86_ops.tlb_remote_flush_with_range = - hv_remote_flush_tlb_with_range; + vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs; + vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range; } #endif -- cgit v1.2.3 From 9ed3bf411226f446a9795f2b49a15b9df98d7cf5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:31:33 -0700 Subject: KVM: x86/mmu: Move filling of Hyper-V's TLB range struct into Hyper-V code Refactor Hyper-V's range-based TLB flushing API to take a gfn+nr_pages pair instead of a struct, and bury said struct in Hyper-V specific code. Passing along two params generates much better code for the common case where KVM is _not_ running on Hyper-V, as forwarding the flush on to Hyper-V's hv_flush_remote_tlbs_range() from kvm_flush_remote_tlbs_range() becomes a tail call. Cc: David Matlack Reviewed-by: David Matlack Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20230405003133.419177-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 9 ++------- arch/x86/kvm/kvm_onhyperv.c | 24 ++++++++++++++++++++---- arch/x86/kvm/kvm_onhyperv.h | 2 +- arch/x86/kvm/mmu/mmu.c | 8 ++------ 4 files changed, 25 insertions(+), 18 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ec22101410ee..09eb37853cb1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -482,11 +482,6 @@ struct kvm_mmu { u64 pdptrs[4]; /* pae */ }; -struct kvm_tlb_range { - u64 start_gfn; - u64 pages; -}; - enum pmc_type { KVM_PMC_GP = 0, KVM_PMC_FIXED, @@ -1589,8 +1584,8 @@ struct kvm_x86_ops { void (*flush_tlb_all)(struct kvm_vcpu *vcpu); void (*flush_tlb_current)(struct kvm_vcpu *vcpu); int (*flush_remote_tlbs)(struct kvm *kvm); - int (*flush_remote_tlbs_range)(struct kvm *kvm, - struct kvm_tlb_range *range); + int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn, + gfn_t nr_pages); /* * Flush any TLB entries associated with the given GVA. diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index 2e2d08da8a3f..ded0bd688c65 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -10,17 +10,22 @@ #include "hyperv.h" #include "kvm_onhyperv.h" +struct kvm_hv_tlb_range { + u64 start_gfn; + u64 pages; +}; + static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, void *data) { - struct kvm_tlb_range *range = data; + struct kvm_hv_tlb_range *range = data; return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, range->pages); } static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, - struct kvm_tlb_range *range) + struct kvm_hv_tlb_range *range) { if (range) return hyperv_flush_guest_mapping_range(root_tdp, @@ -29,7 +34,8 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, return hyperv_flush_guest_mapping(root_tdp); } -int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) +static int __hv_flush_remote_tlbs_range(struct kvm *kvm, + struct kvm_hv_tlb_range *range) { struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_vcpu *vcpu; @@ -85,11 +91,21 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) spin_unlock(&kvm_arch->hv_root_tdp_lock); return ret; } + +int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages) +{ + struct kvm_hv_tlb_range range = { + .start_gfn = start_gfn, + .pages = nr_pages, + }; + + return __hv_flush_remote_tlbs_range(kvm, &range); +} EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); int hv_flush_remote_tlbs(struct kvm *kvm) { - return hv_flush_remote_tlbs_range(kvm, NULL); + return __hv_flush_remote_tlbs_range(kvm, NULL); } EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h index 55d7fcb84cc1..ff127d313242 100644 --- a/arch/x86/kvm/kvm_onhyperv.h +++ b/arch/x86/kvm/kvm_onhyperv.h @@ -7,7 +7,7 @@ #define __ARCH_X86_KVM_KVM_ONHYPERV_H__ #if IS_ENABLED(CONFIG_HYPERV) -int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range); +int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages); int hv_flush_remote_tlbs(struct kvm *kvm); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); #else /* !CONFIG_HYPERV */ diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7654be48ff69..a7adbac0855c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -278,15 +278,11 @@ static inline bool kvm_available_flush_remote_tlbs_range(void) void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages) { - struct kvm_tlb_range range; int ret = -EOPNOTSUPP; - range.start_gfn = start_gfn; - range.pages = nr_pages; - if (kvm_x86_ops.flush_remote_tlbs_range) - ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, &range); - + ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn, + nr_pages); if (ret) kvm_flush_remote_tlbs(kvm); } -- cgit v1.2.3