]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf stat: Read tool events last
authorIan Rogers <irogers@google.com>
Tue, 18 Nov 2025 21:13:24 +0000 (13:13 -0800)
committerNamhyung Kim <namhyung@kernel.org>
Wed, 19 Nov 2025 04:32:41 +0000 (20:32 -0800)
When reading a metric like memory bandwidth on multiple sockets, the
additional sockets will be on CPUS > 0. Because of the affinity
reading, the counters are read on CPU 0 along with the time, then the
later sockets are read. This can lead to the later sockets having a
bandwidth larger than is possible for the period of time. To avoid
this move the reading of tool events to occur after all other events
are read.

Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
tools/perf/builtin-stat.c

index ca1c80c141b65879a37d8778d933ddfbd4ffc064..5c06e9b61821d95a696459a0cfb1eb183eca86cb 100644 (file)
@@ -366,7 +366,7 @@ static int read_counter_cpu(struct evsel *counter, int cpu_map_idx)
        return 0;
 }
 
-static int read_affinity_counters(void)
+static int read_counters_with_affinity(void)
 {
        struct evlist_cpu_iterator evlist_cpu_itr;
        struct affinity saved_affinity, *affinity;
@@ -387,6 +387,9 @@ static int read_affinity_counters(void)
                if (evsel__is_bpf(counter))
                        continue;
 
+               if (evsel__is_tool(counter))
+                       continue;
+
                if (!counter->err)
                        counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
        }
@@ -412,16 +415,46 @@ static int read_bpf_map_counters(void)
        return 0;
 }
 
-static int read_counters(void)
+static int read_tool_counters(void)
 {
-       if (!stat_config.stop_read_counter) {
-               if (read_bpf_map_counters() ||
-                   read_affinity_counters())
-                       return -1;
+       struct evsel *counter;
+
+       evlist__for_each_entry(evsel_list, counter) {
+               int idx;
+
+               if (!evsel__is_tool(counter))
+                       continue;
+
+               perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
+                       if (!counter->err)
+                               counter->err = read_counter_cpu(counter, idx);
+               }
        }
        return 0;
 }
 
+static int read_counters(void)
+{
+       int ret;
+
+       if (stat_config.stop_read_counter)
+               return 0;
+
+       // Read all BPF counters first.
+       ret = read_bpf_map_counters();
+       if (ret)
+               return ret;
+
+       // Read non-BPF and non-tool counters next.
+       ret = read_counters_with_affinity();
+       if (ret)
+               return ret;
+
+       // Read the tool counters last. This way the duration_time counter
+       // should always be greater than any other counter's enabled time.
+       return read_tool_counters();
+}
+
 static void process_counters(void)
 {
        struct evsel *counter;