]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf: Add PERF_EV_CAP_READ_SCOPE
authorKan Liang <kan.liang@linux.intel.com>
Fri, 2 Aug 2024 15:16:38 +0000 (08:16 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 10 Sep 2024 09:44:13 +0000 (11:44 +0200)
Usually, an event can be read from any CPU of the scope. It doesn't need
to be read from the advertised CPU.

Add a new event cap, PERF_EV_CAP_READ_SCOPE. An event of a PMU with
scope can be read from any active CPU in the scope.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240802151643.1691631-3-kan.liang@linux.intel.com
include/linux/perf_event.h
kernel/events/core.c

index a3cbcd727366b01fd57a91effcfd9b34f1204929..794f6605787800e2eb99e8bef71a1d45baa7e147 100644 (file)
@@ -636,10 +636,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
  * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
  * cannot be a group leader. If an event with this flag is detached from the
  * group it is scheduled out and moved into an unrecoverable ERROR state.
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
+ * PMU scope where it is active.
  */
 #define PERF_EV_CAP_SOFTWARE           BIT(0)
 #define PERF_EV_CAP_READ_ACTIVE_PKG    BIT(1)
 #define PERF_EV_CAP_SIBLING            BIT(2)
+#define PERF_EV_CAP_READ_SCOPE         BIT(3)
 
 #define SWEVENT_HLIST_BITS             8
 #define SWEVENT_HLIST_SIZE             (1 << SWEVENT_HLIST_BITS)
index 5ff973513fac9f2fd8676450a4115f89e813446e..2766090de84e4deda7b82cae3dc2b04269cf0fba 100644 (file)
@@ -4556,16 +4556,24 @@ struct perf_read_data {
        int ret;
 };
 
+static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu);
+
 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
 {
+       int local_cpu = smp_processor_id();
        u16 local_pkg, event_pkg;
 
        if ((unsigned)event_cpu >= nr_cpu_ids)
                return event_cpu;
 
-       if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
-               int local_cpu = smp_processor_id();
+       if (event->group_caps & PERF_EV_CAP_READ_SCOPE) {
+               const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu);
+
+               if (cpumask && cpumask_test_cpu(local_cpu, cpumask))
+                       return local_cpu;
+       }
 
+       if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
                event_pkg = topology_physical_package_id(event_cpu);
                local_pkg = topology_physical_package_id(local_cpu);
 
@@ -11905,7 +11913,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
                                if (cpu >= nr_cpu_ids)
                                        ret = -ENODEV;
                                else
-                                       event->cpu = cpu;
+                                       event->event_caps |= PERF_EV_CAP_READ_SCOPE;
                        } else {
                                ret = -ENODEV;
                        }