]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf/x86: Fix n_pair for cancelled txn
authorSasha Levin <sashal@kernel.org>
Tue, 13 Oct 2020 03:28:52 +0000 (23:28 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 29 Oct 2020 09:07:17 +0000 (10:07 +0100)
[ Upstream commit 871a93b0aad65a7f44ee25f2d17932ef6d559850 ]

Kan reported that n_metric gets corrupted for cancelled transactions;
a similar issue exists for n_pair for AMD's Large Increment thing.

The problem was confirmed and confirmed fixed by Kim using:

  sudo perf stat -e "{cycles,cycles,cycles,cycles}:D" -a sleep 10 &

  # should succeed:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

  # should fail:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all,fp_ret_sse_avx_ops.all,cycles}:D" -a workload

  # previously failed, now succeeds with this patch:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

Fixes: 5738891229a2 ("perf/x86/amd: Add support for Large Increment per Cycle Events")
Reported-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Kim Phillips <kim.phillips@amd.com>
Link: https://lkml.kernel.org/r/20201005082516.GG2628@hirez.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/events/core.c
arch/x86/events/perf_event.h

index 4103665c6e032f10067f186c884824ef5e330144..29640b4079af0199743374f0e8e5fa8d85b9235c 100644 (file)
@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 
                cpuc->event_list[n] = event;
                n++;
-               if (is_counter_pair(&event->hw))
+               if (is_counter_pair(&event->hw)) {
                        cpuc->n_pair++;
+                       cpuc->n_txn_pair++;
+               }
        }
        return n;
 }
@@ -1953,6 +1955,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 
        perf_pmu_disable(pmu);
        __this_cpu_write(cpu_hw_events.n_txn, 0);
+       __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
 }
 
 /*
@@ -1978,6 +1981,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
         */
        __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
        __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
+       __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
        perf_pmu_enable(pmu);
 }
 
index e17a3d8a47ede37fb4eddc48c2ba0159e919f0ed..d4d482d16fe1850d0b06982917e07cb61d1ea053 100644 (file)
@@ -198,6 +198,7 @@ struct cpu_hw_events {
                                             they've never been enabled yet */
        int                     n_txn;    /* the # last events in the below arrays;
                                             added in the current transaction */
+       int                     n_txn_pair;
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
        u64                     tags[X86_PMC_IDX_MAX];