diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 49 |
1 files changed, 33 insertions, 16 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 60103fd07bd2..ef4a63af8fce 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2996,20 +2996,15 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) -static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - unsigned int pte_access, int level, - gfn_t gfn, kvm_pfn_t pfn, bool speculative, - bool can_unsync, bool host_writable) +static int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, + gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, + bool can_unsync, bool host_writable, bool ad_disabled, + u64 *new_spte) { u64 spte = 0; int ret = 0; - struct kvm_mmu_page *sp; - if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) - return 0; - - sp = sptep_to_sp(sptep); - if (sp_ad_disabled(sp)) + if (ad_disabled) spte |= SPTE_AD_DISABLED_MASK; else if (kvm_vcpu_ad_need_write_protect(vcpu)) spte |= SPTE_AD_WRPROT_ONLY_MASK; @@ -3062,8 +3057,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, * is responsibility of mmu_get_page / kvm_sync_page. * Same reasoning can be applied to dirty page accounting. */ - if (!can_unsync && is_writable_pte(*sptep)) - goto set_pte; + if (!can_unsync && is_writable_pte(old_spte)) + goto out; if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { pgprintk("%s: found shadow page for %llx, marking ro\n", @@ -3074,15 +3069,37 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, } } - if (pte_access & ACC_WRITE_MASK) { - kvm_vcpu_mark_page_dirty(vcpu, gfn); + if (pte_access & ACC_WRITE_MASK) spte |= spte_shadow_dirty_mask(spte); - } if (speculative) spte = mark_spte_for_access_track(spte); -set_pte: +out: + *new_spte = spte; + return ret; +} + +static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, + unsigned int pte_access, int level, + gfn_t gfn, kvm_pfn_t pfn, bool speculative, + bool can_unsync, bool host_writable) +{ + u64 spte; + struct kvm_mmu_page *sp; + int ret; + + if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) + return 0; + + sp = sptep_to_sp(sptep); + + ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, + can_unsync, host_writable, sp_ad_disabled(sp), &spte); + + if (spte & PT_WRITABLE_MASK) + kvm_vcpu_mark_page_dirty(vcpu, gfn); + if (*sptep == spte) ret |= SET_SPTE_SPURIOUS; else if (mmu_spte_update(sptep, spte)) |