summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2026-02-14 04:26:53 +0300
committerSean Christopherson <seanjc@google.com>2026-03-04 19:52:45 +0300
commit32d76cdfa1222c88262da5b12e0b2bba444c96fa (patch)
tree413c0a05a72060cb48a69a0ad4348964610b9740
parent920da4f75519a3fa3fe2fc25458445b561653610 (diff)
downloadlinux-32d76cdfa1222c88262da5b12e0b2bba444c96fa.tar.xz
KVM: SVM: Move core EFER.SVME enablement to kernel
Move the innermost EFER.SVME logic out of KVM and into to core x86 to land the SVM support alongside VMX support. This will allow providing a more unified API from the kernel to KVM, and will allow moving the bulk of the emergency disabling insanity out of KVM without having a weird split between kernel and KVM for SVM vs. VMX. No functional change intended. Tested-by: Chao Gao <chao.gao@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Sagi Shahar <sagis@google.com> Link: https://patch.msgid.link/20260214012702.2368778-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/include/asm/virt.h6
-rw-r--r--arch/x86/kvm/svm/svm.c34
-rw-r--r--arch/x86/virt/hw.c53
3 files changed, 66 insertions, 27 deletions
diff --git a/arch/x86/include/asm/virt.h b/arch/x86/include/asm/virt.h
index cca0210a5c16..9a0753eaa20c 100644
--- a/arch/x86/include/asm/virt.h
+++ b/arch/x86/include/asm/virt.h
@@ -15,6 +15,12 @@ int x86_vmx_disable_virtualization_cpu(void);
void x86_vmx_emergency_disable_virtualization_cpu(void);
#endif
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void);
+int x86_svm_disable_virtualization_cpu(void);
+void x86_svm_emergency_disable_virtualization_cpu(void);
+#endif
+
#else
static __always_inline void x86_virt_init(void) {}
#endif
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0ae66c770ebc..fc08450cb4b7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -478,27 +478,9 @@ static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm
return &sd->save_area->host_sev_es_save;
}
-static inline void kvm_cpu_svm_disable(void)
-{
- uint64_t efer;
-
- wrmsrq(MSR_VM_HSAVE_PA, 0);
- rdmsrq(MSR_EFER, efer);
- if (efer & EFER_SVME) {
- /*
- * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
- * NMI aren't blocked.
- */
- stgi();
- wrmsrq(MSR_EFER, efer & ~EFER_SVME);
- }
-}
-
static void svm_emergency_disable_virtualization_cpu(void)
{
- virt_rebooting = true;
-
- kvm_cpu_svm_disable();
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
}
static void svm_disable_virtualization_cpu(void)
@@ -507,7 +489,8 @@ static void svm_disable_virtualization_cpu(void)
if (tsc_scaling)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
- kvm_cpu_svm_disable();
+ x86_svm_disable_virtualization_cpu();
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
amd_pmu_disable_virt();
}
@@ -516,12 +499,12 @@ static int svm_enable_virtualization_cpu(void)
{
struct svm_cpu_data *sd;
- uint64_t efer;
int me = raw_smp_processor_id();
+ int r;
- rdmsrq(MSR_EFER, efer);
- if (efer & EFER_SVME)
- return -EBUSY;
+ r = x86_svm_enable_virtualization_cpu();
+ if (r)
+ return r;
sd = per_cpu_ptr(&svm_data, me);
sd->asid_generation = 1;
@@ -529,8 +512,6 @@ static int svm_enable_virtualization_cpu(void)
sd->next_asid = sd->max_asid + 1;
sd->min_asid = max_sev_asid + 1;
- wrmsrq(MSR_EFER, efer | EFER_SVME);
-
wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
@@ -541,7 +522,6 @@ static int svm_enable_virtualization_cpu(void)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
}
-
/*
* Get OSVW bits.
*
diff --git a/arch/x86/virt/hw.c b/arch/x86/virt/hw.c
index dc426c2bc24a..014e9dfab805 100644
--- a/arch/x86/virt/hw.c
+++ b/arch/x86/virt/hw.c
@@ -163,6 +163,59 @@ static __init int x86_vmx_init(void)
static __init int x86_vmx_init(void) { return -EOPNOTSUPP; }
#endif
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void)
+{
+ u64 efer;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SVM))
+ return -EOPNOTSUPP;
+
+ rdmsrq(MSR_EFER, efer);
+ if (efer & EFER_SVME)
+ return -EBUSY;
+
+ wrmsrq(MSR_EFER, efer | EFER_SVME);
+ return 0;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_enable_virtualization_cpu);
+
+int x86_svm_disable_virtualization_cpu(void)
+{
+ int r = -EIO;
+ u64 efer;
+
+ /*
+ * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
+ * NMI aren't blocked.
+ */
+ asm goto("1: stgi\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "memory" : fault);
+ r = 0;
+
+fault:
+ rdmsrq(MSR_EFER, efer);
+ wrmsrq(MSR_EFER, efer & ~EFER_SVME);
+ return r;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_disable_virtualization_cpu);
+
+void x86_svm_emergency_disable_virtualization_cpu(void)
+{
+ u64 efer;
+
+ virt_rebooting = true;
+
+ rdmsrq(MSR_EFER, efer);
+ if (!(efer & EFER_SVME))
+ return;
+
+ x86_svm_disable_virtualization_cpu();
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_emergency_disable_virtualization_cpu);
+#endif
+
void __init x86_virt_init(void)
{
x86_vmx_init();