summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/events/core.c32
-rw-r--r--arch/x86/include/asm/perf_event.h5
2 files changed, 37 insertions, 0 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 0c38a31d5fc7..3ad5c658e286 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -56,6 +56,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.pmu = &pmu,
};
+static DEFINE_PER_CPU(bool, guest_lvtpc_loaded);
+
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);
@@ -1760,6 +1762,25 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+void perf_load_guest_lvtpc(u32 guest_lvtpc)
+{
+ u32 masked = guest_lvtpc & APIC_LVT_MASKED;
+
+ apic_write(APIC_LVTPC,
+ APIC_DM_FIXED | PERF_GUEST_MEDIATED_PMI_VECTOR | masked);
+ this_cpu_write(guest_lvtpc_loaded, true);
+}
+EXPORT_SYMBOL_FOR_MODULES(perf_load_guest_lvtpc, "kvm");
+
+void perf_put_guest_lvtpc(void)
+{
+ this_cpu_write(guest_lvtpc_loaded, false);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+}
+EXPORT_SYMBOL_FOR_MODULES(perf_put_guest_lvtpc, "kvm");
+#endif /* CONFIG_PERF_GUEST_MEDIATED_PMU */
+
static int
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
@@ -1768,6 +1789,17 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
int ret;
/*
+ * Ignore all NMIs when the CPU's LVTPC is configured to route PMIs to
+ * PERF_GUEST_MEDIATED_PMI_VECTOR, i.e. when an NMI time can't be due
+ * to a PMI. Attempting to handle a PMI while the guest's context is
+ * loaded will generate false positives and clobber guest state. Note,
+ * the LVTPC is switched to/from the dedicated mediated PMI IRQ vector
+ * while host events are quiesced.
+ */
+ if (this_cpu_read(guest_lvtpc_loaded))
+ return NMI_DONE;
+
+ /*
* All PMUs/events that share this PMI handler should make sure to
* increment active_events for their events.
*/
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 7276ba70c88a..fb7b261357bf 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -759,6 +759,11 @@ static inline void perf_events_lapic_init(void) { }
static inline void perf_check_microcode(void) { }
#endif
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+extern void perf_load_guest_lvtpc(u32 guest_lvtpc);
+extern void perf_put_guest_lvtpc(void);
+#endif
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);