summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2022-04-20 03:27:47 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2022-04-29 19:49:16 +0300
commit8b9e74bfbf8c7020498a9ea600bd4c0f1915134d (patch)
tree7b4320ef4ead6e6a2b316f796b00cc1850f03f26 /arch/x86/kvm/mmu
parent65936229d35883a1637730e628c3c84d3600025d (diff)
downloadlinux-8b9e74bfbf8c7020498a9ea600bd4c0f1915134d.tar.xz
KVM: x86/mmu: Use enable_mmio_caching to track if MMIO caching is enabled
Clear enable_mmio_caching if hardware can't support MMIO caching and use the dedicated flag to detect if MMIO caching is enabled instead of assuming shadow_mmio_value==0 means MMIO caching is disabled. TDX will use a zero value even when caching is enabled, and is_mmio_spte() isn't so hot that it needs to avoid an extra memory access, i.e. there's no reason to be super clever. And the clever approach may not even be more performant, e.g. gcc-11 lands the extra check on a non-zero value inline, but puts the enable_mmio_caching out-of-line, i.e. avoids the few extra uops for non-MMIO SPTEs. Cc: Isaku Yamahata <isaku.yamahata@intel.com> Cc: Kai Huang <kai.huang@intel.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220420002747.3287931-1-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/mmu/spte.c5
-rw-r--r--arch/x86/kvm/mmu/spte.h4
3 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 77785587332e..7b088417d978 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3036,7 +3036,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
* and only if L1's MAXPHYADDR is inaccurate with respect to
* the hardware's).
*/
- if (unlikely(!shadow_mmio_value) ||
+ if (unlikely(!enable_mmio_caching) ||
unlikely(fault->gfn > kvm_mmu_max_gfn())) {
*ret_val = RET_PF_EMULATE;
return true;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index aab78574e03d..3d611f07eee8 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -19,7 +19,7 @@
#include <asm/memtype.h>
#include <asm/vmx.h>
-static bool __read_mostly enable_mmio_caching = true;
+bool __read_mostly enable_mmio_caching = true;
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
u64 __read_mostly shadow_host_writable_mask;
@@ -351,6 +351,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
mmio_value = 0;
+ if (!mmio_value)
+ enable_mmio_caching = false;
+
shadow_mmio_value = mmio_value;
shadow_mmio_mask = mmio_mask;
shadow_mmio_access_mask = access_mask;
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index e4abeb5df1b1..43eceb821b31 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -5,6 +5,8 @@
#include "mmu_internal.h"
+extern bool __read_mostly enable_mmio_caching;
+
/*
* A MMU present SPTE is backed by actual memory and may or may not be present
* in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it
@@ -204,7 +206,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
static inline bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_value &&
- likely(shadow_mmio_value);
+ likely(enable_mmio_caching);
}
static inline bool is_shadow_present_pte(u64 pte)