summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorJunaid Shahid <junaids@google.com>2018-06-28 00:59:15 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-06 18:58:58 +0300
commitade61e2824443a208bb3aaafd8b345ce878298cd (patch)
treeaae386f7ee3a83188f0d7e5b971763970019d490 /arch/x86
parenteb4b248e152d3ecf189b9d32c04961360dbd938a (diff)
downloadlinux-ade61e2824443a208bb3aaafd8b345ce878298cd.tar.xz
kvm: x86: Skip TLB flush on fast CR3 switch when indicated by guest
When PCIDs are enabled, the MSb of the source operand for a MOV-to-CR3 instruction indicates that the TLB doesn't need to be flushed. This change enables this optimization for MOV-to-CR3s in the guest that have been intercepted by KVM for shadow paging and are handled within the fast CR3 switch path. Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c28
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c11
4 files changed, 36 insertions, 17 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1c253f15f9cd..8b4aa5e7ff92 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1318,7 +1318,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2af15db12ccd..2b7cfc8e41bb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4037,7 +4037,8 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
}
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
- union kvm_mmu_page_role new_role)
+ union kvm_mmu_page_role new_role,
+ bool skip_tlb_flush)
{
struct kvm_mmu *mmu = &vcpu->arch.mmu;
@@ -4070,7 +4071,9 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- kvm_x86_ops->tlb_flush(vcpu, true);
+ if (!skip_tlb_flush)
+ kvm_x86_ops->tlb_flush(vcpu, true);
+
__clear_sp_write_flooding_count(
page_header(mmu->root_hpa));
@@ -4082,15 +4085,17 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
}
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
- union kvm_mmu_page_role new_role)
+ union kvm_mmu_page_role new_role,
+ bool skip_tlb_flush)
{
- if (!fast_cr3_switch(vcpu, new_cr3, new_role))
+ if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
kvm_mmu_free_roots(vcpu, false);
}
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3)
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
{
- __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu));
+ __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
+ skip_tlb_flush);
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
@@ -4733,7 +4738,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
union kvm_mmu_page_role root_page_role =
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
- __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role);
+ __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false);
context->shadow_root_level = PT64_ROOT_4LEVEL;
context->nx = true;
@@ -5196,11 +5201,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
+ if (VALID_PAGE(mmu->prev_root.hpa) &&
+ pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3))
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
++vcpu->stat.invlpg;
/*
- * Mappings not reachable via the current cr3 will be synced when
- * switching to that cr3, so nothing needs to be done here for them.
+ * Mappings not reachable via the current cr3 or the prev_root.cr3 will
+ * be synced when switching to that cr3, so nothing needs to be done
+ * here for them.
*/
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 80be024cb979..6151418cec32 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8819,10 +8819,14 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
+ if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_root.cr3)
+ == operand.pcid)
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
/*
- * If the current cr3 does not use the given PCID, then nothing
- * needs to be done here because a resync will happen anyway
- * before switching to any other CR3.
+ * If neither the current cr3 nor the prev_root.cr3 use the
+ * given PCID, then nothing needs to be done here because a
+ * resync will happen anyway before switching to any other CR3.
*/
return kvm_skip_emulated_instruction(vcpu);
@@ -11434,7 +11438,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
}
if (!nested_ept)
- kvm_mmu_new_cr3(vcpu, cr3);
+ kvm_mmu_new_cr3(vcpu, cr3, false);
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7748037b17fd..493afbf12e78 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -847,16 +847,21 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+ bool skip_tlb_flush = false;
#ifdef CONFIG_X86_64
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
- if (pcid_enabled)
+ if (pcid_enabled) {
+ skip_tlb_flush = cr3 & CR3_PCID_INVD;
cr3 &= ~CR3_PCID_INVD;
+ }
#endif
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
+ if (!skip_tlb_flush)
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
@@ -867,7 +872,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
- kvm_mmu_new_cr3(vcpu, cr3);
+ kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);