#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
#define PERF_PMU_CAP_AUX_PAUSE 0x0200
#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
+#define PERF_PMU_CAP_MEDIATED_VPMU 0x0800
/**
* pmu::scope
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+int perf_create_mediated_pmu(void);
+void perf_release_mediated_pmu(void);
+#endif
+
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
call_rcu(&event->rcu_head, free_event_rcu);
}
+static void mediated_pmu_unaccount_event(struct perf_event *event);
+
DEFINE_FREE(__free_event, struct perf_event *, if (_T) __free_event(_T))
/* vs perf_event_alloc() success */
irq_work_sync(&event->pending_disable_irq);
unaccount_event(event);
+ mediated_pmu_unaccount_event(event);
if (event->rb) {
/*
}
EXPORT_SYMBOL_GPL(perf_event_pause);
+#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
+static atomic_t nr_include_guest_events __read_mostly;
+
+static atomic_t nr_mediated_pmu_vms __read_mostly;
+static DEFINE_MUTEX(perf_mediated_pmu_mutex);
+
+/* !exclude_guest event of PMU with PERF_PMU_CAP_MEDIATED_VPMU */
+static inline bool is_include_guest_event(struct perf_event *event)
+{
+ if ((event->pmu->capabilities & PERF_PMU_CAP_MEDIATED_VPMU) &&
+ !event->attr.exclude_guest)
+ return true;
+
+ return false;
+}
+
+static int mediated_pmu_account_event(struct perf_event *event)
+{
+ if (!is_include_guest_event(event))
+ return 0;
+
+ guard(mutex)(&perf_mediated_pmu_mutex);
+
+ if (atomic_read(&nr_mediated_pmu_vms))
+ return -EOPNOTSUPP;
+
+ atomic_inc(&nr_include_guest_events);
+ return 0;
+}
+
+static void mediated_pmu_unaccount_event(struct perf_event *event)
+{
+ if (!is_include_guest_event(event))
+ return;
+
+ atomic_dec(&nr_include_guest_events);
+}
+
+/*
+ * Currently invoked at VM creation to
+ * - Check whether there are existing !exclude_guest events of PMU with
+ * PERF_PMU_CAP_MEDIATED_VPMU
+ * - Set nr_mediated_pmu_vms to prevent !exclude_guest event creation on
+ * PMUs with PERF_PMU_CAP_MEDIATED_VPMU
+ *
+ * No impact for the PMU without PERF_PMU_CAP_MEDIATED_VPMU. The perf
+ * still owns all the PMU resources.
+ */
+int perf_create_mediated_pmu(void)
+{
+ guard(mutex)(&perf_mediated_pmu_mutex);
+ if (atomic_inc_not_zero(&nr_mediated_pmu_vms))
+ return 0;
+
+ if (atomic_read(&nr_include_guest_events))
+ return -EBUSY;
+
+ atomic_inc(&nr_mediated_pmu_vms);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(perf_create_mediated_pmu);
+
+void perf_release_mediated_pmu(void)
+{
+ if (WARN_ON_ONCE(!atomic_read(&nr_mediated_pmu_vms)))
+ return;
+
+ atomic_dec(&nr_mediated_pmu_vms);
+}
+EXPORT_SYMBOL_GPL(perf_release_mediated_pmu);
+#else
+static int mediated_pmu_account_event(struct perf_event *event) { return 0; }
+static void mediated_pmu_unaccount_event(struct perf_event *event) {}
+#endif
+
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
if (err)
return ERR_PTR(err);
+ err = mediated_pmu_account_event(event);
+ if (err)
+ return ERR_PTR(err);
+
/* symmetric to unaccount_event() in _free_event() */
account_event(event);