diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-03-03 05:02:34 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-03-16 19:57:42 +0300 |
commit | a102a674e423f41eed3bcdfe887287b7a68fb735 (patch) | |
tree | 613a5bd0e6ddced936b7fa97c91a225ea69d4c0e /arch/x86/kvm/mmu | |
parent | e743664bea8e04d2735b958fe912e7cce3ab350f (diff) | |
download | linux-a102a674e423f41eed3bcdfe887287b7a68fb735.tar.xz |
KVM: x86/mmu: Don't drop level/direct from MMU role calculation
Use the calculated role as-is when propagating it to kvm_mmu.mmu_role,
i.e. stop masking off meaningful fields. The concept of masking off
fields came from kvm_mmu_pte_write(), which (correctly) ignores certain
fields when comparing kvm_mmu_page.role against kvm_mmu.mmu_role, e.g.
the current mmu's access and level have no relation to a shadow page's
access and level.
Masking off the level causes problems for 5-level paging, e.g. CR4.LA57
has its own redundant flag in the extended role, and nested EPT would
need a similar hack to support 5-level paging for L2.
Opportunistically rework the mask for kvm_mmu_pte_write() to define the
fields that should be ignored as opposed to the fields that should be
checked, i.e. make it opt-out instead of opt-in so that new fields are
automatically picked up. While doing so, stop ignoring "direct". The
field is effectively ignored anyways because kvm_mmu_pte_write() is only
reached with an indirect mmu and the loop only walks indirect shadow
pages, but double checking "direct" literally costs nothing.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index db0b66e153a2..8b22fc0f0973 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -215,17 +215,6 @@ struct kvm_shadow_walk_iterator { unsigned index; }; -static const union kvm_mmu_page_role mmu_base_role_mask = { - .cr0_wp = 1, - .gpte_is_8_bytes = 1, - .nxe = 1, - .smep_andnot_wp = 1, - .smap_andnot_wp = 1, - .smm = 1, - .guest_mode = 1, - .ad_disabled = 1, -}; - #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ (_root), (_addr)); \ @@ -4930,7 +4919,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) union kvm_mmu_role new_role = kvm_calc_tdp_mmu_root_page_role(vcpu, false); - new_role.base.word &= mmu_base_role_mask.word; if (new_role.as_u64 == context->mmu_role.as_u64) return; @@ -5002,7 +4990,6 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) union kvm_mmu_role new_role = kvm_calc_shadow_mmu_root_page_role(vcpu, false); - new_role.base.word &= mmu_base_role_mask.word; if (new_role.as_u64 == context->mmu_role.as_u64) return; @@ -5059,7 +5046,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); - new_role.base.word &= mmu_base_role_mask.word; if (new_role.as_u64 == context->mmu_role.as_u64) return; @@ -5100,7 +5086,6 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; - new_role.base.word &= mmu_base_role_mask.word; if (new_role.as_u64 == g_context->mmu_role.as_u64) return; @@ -5339,6 +5324,22 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) return spte; } +/* + * Ignore various flags when determining if a SPTE can be immediately + * overwritten for the current MMU. + * - level: explicitly checked in mmu_pte_write_new_pte(), and will never + * match the current MMU role, as MMU's level tracks the root level. + * - access: updated based on the new guest PTE + * - quadrant: handled by get_written_sptes() + * - invalid: always false (loop only walks valid shadow pages) + */ +static const union kvm_mmu_page_role role_ign = { + .level = 0xf, + .access = 0x7, + .quadrant = 0x3, + .invalid = 0x1, +}; + static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes, struct kvm_page_track_notifier_node *node) @@ -5394,8 +5395,8 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte); if (gentry && - !((sp->role.word ^ base_role) - & mmu_base_role_mask.word) && rmap_can_add(vcpu)) + !((sp->role.word ^ base_role) & ~role_ign.word) && + rmap_can_add(vcpu)) mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); if (need_remote_flush(entry, *spte)) remote_flush = true; |