]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/x86/amd: Fix AMD BRS period adjustment
authorPeter Zijlstra <peterz@infradead.org>
Tue, 10 May 2022 19:22:04 +0000 (21:22 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 17 May 2022 22:08:25 +0000 (00:08 +0200)
There's two problems with the current amd_brs_adjust_period() code:

 - it isn't in fact AMD specific and wil always adjust the period;

 - it adjusts the period, while it should only adjust the event count,
   resulting in repoting a short period.

Fix this by using x86_pmu.limit_period, this makes it specific to the
AMD BRS case and ensures only the event count is adjusted while the
reported period is unmodified.

Fixes: ba2fe7500845 ("perf/x86/amd: Add AMD branch sampling period adjustment")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/perf_event.h

index d81eac2284eadfaf5d5f4959a2c9ed1a66aa5e38..3eee59c64daa525b4c14ad6c6d57cd9f1aada224 100644 (file)
@@ -1255,6 +1255,18 @@ static void amd_pmu_sched_task(struct perf_event_context *ctx,
                amd_pmu_brs_sched_task(ctx, sched_in);
 }
 
+static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
+{
+       /*
+        * Decrease period by the depth of the BRS feature to get the last N
+        * taken branches and approximate the desired period
+        */
+       if (has_branch_stack(event) && left > x86_pmu.lbr_nr)
+               left -= x86_pmu.lbr_nr;
+
+       return left;
+}
+
 static __initconst const struct x86_pmu amd_pmu = {
        .name                   = "AMD",
        .handle_irq             = amd_pmu_handle_irq,
@@ -1415,6 +1427,7 @@ static int __init amd_core_pmu_init(void)
        if (boot_cpu_data.x86 >= 0x19 && !amd_brs_init()) {
                x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
                x86_pmu.sched_task = amd_pmu_sched_task;
+               x86_pmu.limit_period = amd_pmu_limit_period;
                /*
                 * put_event_constraints callback same as Fam17h, set above
                 */
index b08052b05db684a20fbca6e455a66c1c9bac23c7..30788894124f0e265ddda6fd4b76b8ea1c82b3fd 100644 (file)
@@ -1374,13 +1374,6 @@ int x86_perf_event_set_period(struct perf_event *event)
            x86_pmu.set_topdown_event_period)
                return x86_pmu.set_topdown_event_period(event);
 
-       /*
-        * decrease period by the depth of the BRS feature to get
-        * the last N taken branches and approximate the desired period
-        */
-       if (has_branch_stack(event))
-               period = amd_brs_adjust_period(period);
-
        /*
         * If we are way outside a reasonable range then just skip forward:
         */
index 3b0324584da3bbb3a9991d4421c19569fafaeae4..21a5482bcf8458c29a62fb6670a4eadaa4aaa683 100644 (file)
@@ -1254,14 +1254,6 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
 }
 
 void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in);
-
-static inline s64 amd_brs_adjust_period(s64 period)
-{
-       if (period > x86_pmu.lbr_nr)
-               return period - x86_pmu.lbr_nr;
-
-       return period;
-}
 #else
 static inline int amd_brs_init(void)
 {
@@ -1290,11 +1282,6 @@ static inline void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool s
 {
 }
 
-static inline s64 amd_brs_adjust_period(s64 period)
-{
-       return period;
-}
-
 static inline void amd_brs_enable_all(void)
 {
 }
@@ -1324,11 +1311,6 @@ static inline void amd_brs_enable_all(void)
 static inline void amd_brs_disable_all(void)
 {
 }
-
-static inline s64 amd_brs_adjust_period(s64 period)
-{
-       return period;
-}
 #endif /* CONFIG_CPU_SUP_AMD */
 
 static inline int is_pebs_pt(struct perf_event *event)