]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drivers/perf: arm_spe: Use perf_allow_kernel() for permissions
authorJames Clark <james.clark@linaro.org>
Tue, 27 Aug 2024 14:51:12 +0000 (15:51 +0100)
committerWill Deacon <will@kernel.org>
Fri, 30 Aug 2024 10:42:24 +0000 (11:42 +0100)
Use perf_allow_kernel() for 'pa_enable' (physical addresses),
'pct_enable' (physical timestamps) and context IDs. This means that
perf_event_paranoid is now taken into account and LSM hooks can be used,
which is more consistent with other perf_event_open calls. For example
PERF_SAMPLE_PHYS_ADDR uses perf_allow_kernel() rather than just
perfmon_capable().

This also indirectly fixes the following error message which is
misleading because perf_event_paranoid is not taken into account by
perfmon_capable():

  $ perf record -e arm_spe/pa_enable/

  Error:
  Access to performance monitoring and observability operations is
  limited. Consider adjusting /proc/sys/kernel/perf_event_paranoid
  setting ...

Suggested-by: Al Grant <al.grant@arm.com>
Signed-off-by: James Clark <james.clark@linaro.org>
Link: https://lore.kernel.org/r/20240827145113.1224604-1-james.clark@linaro.org
Link: https://lore.kernel.org/all/20240807120039.GD37996@noisy.programming.kicks-ass.net/
Signed-off-by: Will Deacon <will@kernel.org>
drivers/perf/arm_spe_pmu.c
include/linux/perf_event.h
kernel/events/core.c

index 9100d82bfabc0d5f99216f05befd713d9b67f483..3569050f9cf375fecf14053b9a983e75f31bbabc 100644 (file)
@@ -41,7 +41,7 @@
 
 /*
  * Cache if the event is allowed to trace Context information.
- * This allows us to perform the check, i.e, perfmon_capable(),
+ * This allows us to perform the check, i.e, perf_allow_kernel(),
  * in the context of the event owner, once, during the event_init().
  */
 #define SPE_PMU_HW_FLAGS_CX                    0x00001
@@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C
 
 static void set_spe_event_has_cx(struct perf_event *event)
 {
-       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr))
                event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
 }
 
@@ -745,9 +745,8 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
 
        set_spe_event_has_cx(event);
        reg = arm_spe_event_to_pmscr(event);
-       if (!perfmon_capable() &&
-           (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
-               return -EACCES;
+       if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
+               return perf_allow_kernel(&event->attr);
 
        return 0;
 }
index 1a8942277ddad258ee6c54681b6aafd54317c094..e336306b8c08e8fc490074c4c962d635bfe2c076 100644 (file)
@@ -1602,13 +1602,7 @@ static inline int perf_is_paranoid(void)
        return sysctl_perf_event_paranoid > -1;
 }
 
-static inline int perf_allow_kernel(struct perf_event_attr *attr)
-{
-       if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
-               return -EACCES;
-
-       return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
-}
+int perf_allow_kernel(struct perf_event_attr *attr);
 
 static inline int perf_allow_cpu(struct perf_event_attr *attr)
 {
index aa3450bdc2276cd47d749ac9631600f7963ecfa9..ae7d63c0c593b10d396b3afb3b87fb9ee8d1c39f 100644 (file)
@@ -13351,6 +13351,15 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
        return &event->attr;
 }
 
+int perf_allow_kernel(struct perf_event_attr *attr)
+{
+       if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
+               return -EACCES;
+
+       return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
+}
+EXPORT_SYMBOL_GPL(perf_allow_kernel);
+
 /*
  * Inherit an event from parent task to child task.
  *