]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/arch: Record sample last_period before updating on the x86 and PowerPC platforms
authorMark Barnett <mark.barnett@arm.com>
Tue, 8 Apr 2025 17:15:26 +0000 (18:15 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 9 Apr 2025 11:45:08 +0000 (13:45 +0200)
This change alters the PowerPC and x86 driver implementations to record
the last sample period before the event is updated for the next period.

A common pattern in PMU driver implementations is to have a
"*_event_set_period" function which takes care of updating the various
period-related fields in a perf_event structure. In most cases, the
drivers choose to call this function after initializing a sample data
structure with perf_sample_data_init. The x86 and PowerPC drivers
deviate from this, choosing to update the period before initializing the
sample data. When using an event with an alternate sample period, this
causes an incorrect period to be written to the sample data that gets
reported to userspace.

Signed-off-by: Mark Barnett <mark.barnett@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20250408171530.140858-2-mark.barnett@arm.com
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-fsl-emb.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/knc.c

index b906d28f74fd4e5e13c2465b480e4e363ea1762c..42ff4d167acc5a0a2713e7af14d6f94b3f362633 100644 (file)
@@ -2239,6 +2239,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                               struct pt_regs *regs)
 {
        u64 period = event->hw.sample_period;
+       const u64 last_period = event->hw.last_period;
        s64 prev, delta, left;
        int record = 0;
 
@@ -2320,7 +2321,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        if (record) {
                struct perf_sample_data data;
 
-               perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
+               perf_sample_data_init(&data, ~0ULL, last_period);
 
                if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE)
                        perf_get_data_addr(event, regs, &data.addr);
index 1a53ab08447cbe13b3d30b1d02f25ca2ccc5bb93..d2ffcc7021c5180810f03ffe68049eafd8bd16a4 100644 (file)
@@ -590,6 +590,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                               struct pt_regs *regs)
 {
        u64 period = event->hw.sample_period;
+       const u64 last_period = event->hw.last_period;
        s64 prev, delta, left;
        int record = 0;
 
@@ -632,7 +633,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        if (record) {
                struct perf_sample_data data;
 
-               perf_sample_data_init(&data, 0, event->hw.last_period);
+               perf_sample_data_init(&data, 0, last_period);
 
                if (perf_event_overflow(event, &data, regs))
                        fsl_emb_pmu_stop(event, 0);
index f53ae1fd986f10f66c3ccf9e42e549f41a47ef42..cae213296a6327e15218587f0dad5365e90ea78c 100644 (file)
@@ -1684,6 +1684,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
        struct cpu_hw_events *cpuc;
        struct perf_event *event;
        int idx, handled = 0;
+       u64 last_period;
        u64 val;
 
        cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1703,6 +1704,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
                        continue;
 
                event = cpuc->events[idx];
+               last_period = event->hw.last_period;
 
                val = static_call(x86_pmu_update)(event);
                if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
@@ -1716,7 +1718,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
                if (!static_call(x86_pmu_set_period)(event))
                        continue;
 
-               perf_sample_data_init(&data, 0, event->hw.last_period);
+               perf_sample_data_init(&data, 0, last_period);
 
                perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
 
index 3152a018c5028b2e7b9b0d17d1d9e90e3bf9091a..0ceaa1b07019b1c847910806ceeafcd90653344a 100644 (file)
@@ -3223,6 +3223,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
+               u64 last_period;
 
                handled++;
 
@@ -3250,10 +3251,12 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
                if (is_pebs_counter_event_group(event))
                        x86_pmu.drain_pebs(regs, &data);
 
+               last_period = event->hw.last_period;
+
                if (!intel_pmu_save_and_restart(event))
                        continue;
 
-               perf_sample_data_init(&data, 0, event->hw.last_period);
+               perf_sample_data_init(&data, 0, last_period);
 
                if (has_branch_stack(event))
                        intel_pmu_lbr_save_brstack(&data, cpuc, event);
index 034a1f6a457c6985e44efd2a800db85cc562555b..3e8ec049b46d15d4c100420090a07acbae054984 100644 (file)
@@ -241,16 +241,18 @@ again:
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
+               u64 last_period;
 
                handled++;
 
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
+               last_period = event->hw.last_period;
                if (!intel_pmu_save_and_restart(event))
                        continue;
 
-               perf_sample_data_init(&data, 0, event->hw.last_period);
+               perf_sample_data_init(&data, 0, last_period);
 
                if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);