diff options
author | Junaid Shahid <junaids@google.com> | 2018-06-28 00:59:18 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-08-06 18:59:00 +0300 |
commit | 956bf3531fba53c0501eda4fbc67950b0f7b913f (patch) | |
tree | 37e602fdc99567f8e5a702dfdbca00d415327099 /arch/x86/kvm/mmu.c | |
parent | 08fb59d8a47d5e1f9de08659603a47f117fe60d5 (diff) | |
download | linux-956bf3531fba53c0501eda4fbc67950b0f7b913f.tar.xz |
kvm: x86: Skip shadow page resync on CR3 switch when indicated by guest
When the guest indicates that the TLB doesn't need to be flushed in a
CR3 switch, we can also skip resyncing the shadow page tables since an
out-of-sync shadow page table is equivalent to an out-of-sync TLB.
Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 33 |
1 files changed, 30 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0f6965ce016a..9446a36a4ab7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4098,9 +4098,19 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, */ kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); - kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); - if (!skip_tlb_flush) + if (!skip_tlb_flush) { + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_x86_ops->tlb_flush(vcpu, true); + } + + /* + * The last MMIO access's GVA and GPA are cached in the + * VCPU. When switching to a new CR3, that GVA->GPA + * mapping may no longer be valid. So clear any cached + * MMIO info even when we don't need to sync the shadow + * page tables. + */ + vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); __clear_sp_write_flooding_count( page_header(mmu->root_hpa)); @@ -5217,6 +5227,21 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) struct kvm_mmu *mmu = &vcpu->arch.mmu; mmu->invlpg(vcpu, gva, mmu->root_hpa); + + /* + * INVLPG is required to invalidate any global mappings for the VA, + * irrespective of PCID. Since it would take us roughly similar amount + * of work to determine whether the prev_root mapping of the VA is + * marked global, or to just sync it blindly, so we might as well just + * always sync it. + * + * Mappings not reachable via the current cr3 or the prev_root.cr3 will + * be synced when switching to that cr3, so nothing needs to be done + * here for them. + */ + if (VALID_PAGE(mmu->prev_root.hpa)) + mmu->invlpg(vcpu, gva, mmu->prev_root.hpa); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); ++vcpu->stat.invlpg; } @@ -5232,8 +5257,10 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) } if (VALID_PAGE(mmu->prev_root.hpa) && - pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) + pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) { + mmu->invlpg(vcpu, gva, mmu->prev_root.hpa); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } ++vcpu->stat.invlpg; |