.pmu = &pmu,
};
+static DEFINE_PER_CPU(bool, guest_lvtpc_loaded);
+
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+void perf_load_guest_lvtpc(u32 guest_lvtpc)
+{
+ u32 masked = guest_lvtpc & APIC_LVT_MASKED;
+
+ apic_write(APIC_LVTPC,
+ APIC_DM_FIXED | PERF_GUEST_MEDIATED_PMI_VECTOR | masked);
+ this_cpu_write(guest_lvtpc_loaded, true);
+}
+EXPORT_SYMBOL_FOR_MODULES(perf_load_guest_lvtpc, "kvm");
+
+void perf_put_guest_lvtpc(void)
+{
+ this_cpu_write(guest_lvtpc_loaded, false);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+}
+EXPORT_SYMBOL_FOR_MODULES(perf_put_guest_lvtpc, "kvm");
+#endif /* CONFIG_PERF_GUEST_MEDIATED_PMU */
+
static int
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
u64 finish_clock;
int ret;
+ /*
+ * Ignore all NMIs when the CPU's LVTPC is configured to route PMIs to
+ * PERF_GUEST_MEDIATED_PMI_VECTOR, i.e. when an NMI time can't be due
+ * to a PMI. Attempting to handle a PMI while the guest's context is
+ * loaded will generate false positives and clobber guest state. Note,
+ * the LVTPC is switched to/from the dedicated mediated PMI IRQ vector
+ * while host events are quiesced.
+ */
+ if (this_cpu_read(guest_lvtpc_loaded))
+ return NMI_DONE;
+
/*
* All PMUs/events that share this PMI handler should make sure to
* increment active_events for their events.
static inline void perf_check_microcode(void) { }
#endif
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+extern void perf_load_guest_lvtpc(u32 guest_lvtpc);
+extern void perf_put_guest_lvtpc(void);
+#endif
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);