summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm')
-rw-r--r--arch/x86/kvm/svm/pmu.c1
-rw-r--r--arch/x86/kvm/svm/sev.c10
-rw-r--r--arch/x86/kvm/svm/svm.c25
-rw-r--r--arch/x86/kvm/svm/svm.h2
4 files changed, 19 insertions, 19 deletions
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 288f7f2a46f2..aa4379e46e96 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -113,6 +113,7 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
return pmu->version > 1;
default:
if (msr > MSR_F15H_PERF_CTR5 &&
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0635bd71c10e..7b1e9424af15 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -4618,6 +4618,16 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
}
+
+ /*
+ * TSC_AUX is always virtualized for SEV-ES guests when the feature is
+ * available, i.e. TSC_AUX is loaded on #VMEXIT from the host save area.
+ * Set the save area to the current hardware value, i.e. the current
+ * user return value, so that the correct value is restored on #VMEXIT.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_V_TSC_AUX) &&
+ !WARN_ON_ONCE(tsc_aux_uret_slot < 0))
+ hostsa->tsc_aux = kvm_get_user_return_msr(tsc_aux_uret_slot);
}
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c813d6cce69f..83ca0b05abc1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -195,7 +195,7 @@ static DEFINE_MUTEX(vmcb_dump_mutex);
* RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
* defer the restoration of TSC_AUX until the CPU returns to userspace.
*/
-static int tsc_aux_uret_slot __read_mostly = -1;
+int tsc_aux_uret_slot __ro_after_init = -1;
static int get_npt_level(void)
{
@@ -577,18 +577,6 @@ static int svm_enable_virtualization_cpu(void)
amd_pmu_enable_virt();
- /*
- * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
- * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
- * Since Linux does not change the value of TSC_AUX once set, prime the
- * TSC_AUX field now to avoid a RDMSR on every vCPU run.
- */
- if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
- u32 __maybe_unused msr_hi;
-
- rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
- }
-
return 0;
}
@@ -1423,10 +1411,10 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
/*
- * TSC_AUX is always virtualized for SEV-ES guests when the feature is
- * available. The user return MSR support is not required in this case
- * because TSC_AUX is restored on #VMEXIT from the host save area
- * (which has been initialized in svm_enable_virtualization_cpu()).
+ * TSC_AUX is always virtualized (context switched by hardware) for
+ * SEV-ES guests when the feature is available. For non-SEV-ES guests,
+ * context switch TSC_AUX via the user_return MSR infrastructure (not
+ * all CPUs support TSC_AUX virtualization).
*/
if (likely(tsc_aux_uret_slot >= 0) &&
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
@@ -3021,8 +3009,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
* TSC_AUX is always virtualized for SEV-ES guests when the
* feature is available. The user return MSR support is not
* required in this case because TSC_AUX is restored on #VMEXIT
- * from the host save area (which has been initialized in
- * svm_enable_virtualization_cpu()).
+ * from the host save area.
*/
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
break;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 58b9d168e0c8..04371aa8c8f2 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -52,6 +52,8 @@ extern bool x2avic_enabled;
extern bool vnmi;
extern int lbrv;
+extern int tsc_aux_uret_slot __ro_after_init;
+
/*
* Clean bits in VMCB.
* VMCB_ALL_CLEAN_MASK might also need to