]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf parse-events: Make X modifier more respectful of groups
authorIan Rogers <irogers@google.com>
Fri, 17 Oct 2025 23:03:57 +0000 (16:03 -0700)
committerNamhyung Kim <namhyung@kernel.org>
Mon, 20 Oct 2025 04:05:37 +0000 (13:05 +0900)
Events with an X modifier were reordered within a group, for example
slots was made the leader in:
```
$ perf record -e '{cpu/mem-stores/ppu,cpu/slots/uX}' -- sleep 1
```

Fix by making `dont_regroup` evsels always use their index for
sorting. Make the cur_leader, when fixing the groups, be that of
`dont_regroup` evsel so that the `dont_regroup` evsel doesn't become a
leader.

On a tigerlake this patch corrects this and meets expectations in:
```
$ perf stat -e '{cpu/mem-stores/,cpu/slots/uX}' -a -- sleep 0.1

 Performance counter stats for 'system wide':

        83,458,652      cpu/mem-stores/
     2,720,854,880      cpu/slots/uX

       0.103780587 seconds time elapsed

$ perf stat -e 'slots,slots:X' -a -- sleep 0.1

 Performance counter stats for 'system wide':

       732,042,247      slots                (48.96%)
       643,288,155      slots:X              (51.04%)

       0.102731018 seconds time elapsed
```

Closes: https://lore.kernel.org/lkml/18f20d38-070c-4e17-bc90-cf7102e1e53d@linux.intel.com/
Fixes: 035c17893082 ("perf parse-events: Add 'X' modifier to exclude an event from being regrouped")
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
tools/perf/util/parse-events.c

index 3aec86aebdc6bc9d301ca1723dd90991ba944926..0c0dc20b1c1375f6f0ddda7213f2c2310a2bd462 100644 (file)
@@ -1973,14 +1973,18 @@ static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct li
         * event's index is used. An index may be forced for events that
         * must be in the same group, namely Intel topdown events.
         */
-       if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
+       if (lhs->dont_regroup) {
+               lhs_sort_idx = lhs_core->idx;
+       } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
                lhs_sort_idx = *force_grouped_idx;
        } else {
                bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
 
                lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
        }
-       if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
+       if (rhs->dont_regroup) {
+               rhs_sort_idx = rhs_core->idx;
+       } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
                rhs_sort_idx = *force_grouped_idx;
        } else {
                bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
@@ -2078,10 +2082,10 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
         */
        idx = 0;
        list_for_each_entry(pos, list, core.node) {
-               const struct evsel *pos_leader = evsel__leader(pos);
+               struct evsel *pos_leader = evsel__leader(pos);
                const char *pos_pmu_name = pos->group_pmu_name;
                const char *cur_leader_pmu_name;
-               bool pos_force_grouped = force_grouped_idx != -1 &&
+               bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup &&
                        arch_evsel__must_be_in_group(pos);
 
                /* Reset index and nr_members. */
@@ -2095,8 +2099,8 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
                 * groups can't span PMUs.
                 */
                if (!cur_leader || pos->dont_regroup) {
-                       cur_leader = pos;
-                       cur_leaders_grp = &pos->core;
+                       cur_leader = pos->dont_regroup ? pos_leader : pos;
+                       cur_leaders_grp = &cur_leader->core;
                        if (pos_force_grouped)
                                force_grouped_leader = pos;
                }