]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf evsel: Improve falling back from cycles
authorIan Rogers <irogers@google.com>
Wed, 18 Mar 2026 23:45:56 +0000 (16:45 -0700)
committerNamhyung Kim <namhyung@kernel.org>
Thu, 19 Mar 2026 21:42:45 +0000 (14:42 -0700)
Switch to using evsel__match rather than comparing perf_event_attr
values, this is robust on hybrid architectures.
Ensure evsel->pmu matches the evsel->core.attr.
Remove exclude bits that get set in other fallback attempts when
switching the event.
Log the event name with modifiers when switching the event on fallback.

Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Thomas Richter <tmricht@linux.ibm.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
tools/perf/util/evsel.c
tools/perf/util/evsel.h

index f59228c1a39eb2bc00f816c07cb0f45dc1ca11b8..bd14d9bbc91f005b5fc26869ec1fff0237973bac 100644 (file)
@@ -3785,25 +3785,42 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
 {
        int paranoid;
 
-       if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
-           evsel->core.attr.type   == PERF_TYPE_HARDWARE &&
-           evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+       if ((err == ENODEV || err == ENOENT || err == ENXIO) &&
+           evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
                /*
-                * If it's cycles then fall back to hrtimer based cpu-clock sw
-                * counter, which is always available even if no PMU support.
-                *
-                * PPC returns ENXIO until 2.6.37 (behavior changed with commit
-                * b0a873e).
+                * If it's the legacy hardware cycles event fails then fall back
+                * to hrtimer based cpu-clock sw counter, which is always
+                * available even if no PMU support. PPC returned ENXIO rather
+                * than ENODEV or ENOENT until 2.6.37.
                 */
-               evsel->core.attr.type   = PERF_TYPE_SOFTWARE;
+               evsel->pmu = perf_pmus__find_by_type(PERF_TYPE_SOFTWARE);
+               assert(evsel->pmu); /* software is a "well-known" and can't fail PMU type. */
+
+               /* Configure the event. */
+               evsel->core.attr.type = PERF_TYPE_SOFTWARE;
                evsel->core.attr.config = target__has_cpu(target)
                        ? PERF_COUNT_SW_CPU_CLOCK
                        : PERF_COUNT_SW_TASK_CLOCK;
-               scnprintf(msg, msgsize,
-                       "The cycles event is not supported, trying to fall back to %s",
-                       target__has_cpu(target) ? "cpu-clock" : "task-clock");
+               evsel->core.is_pmu_core = false;
+
+               /* Remove excludes for new event. */
+               if (evsel->fallenback_eacces) {
+                       evsel->core.attr.exclude_kernel = 0;
+                       evsel->core.attr.exclude_hv     = 0;
+                       evsel->fallenback_eacces = false;
+               }
+               if (evsel->fallenback_eopnotsupp) {
+                       evsel->core.attr.exclude_guest = 0;
+                       evsel->fallenback_eopnotsupp = false;
+               }
 
+               /* Name is recomputed by evsel__name. */
                zfree(&evsel->name);
+
+               /* Log message. */
+               scnprintf(msg, msgsize,
+                         "The cycles event is not supported, trying to fall back to %s",
+                         evsel__name(evsel));
                return true;
        } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
                   (paranoid = perf_event_paranoid()) > 1) {
@@ -3830,7 +3847,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
                          " samples", paranoid);
                evsel->core.attr.exclude_kernel = 1;
                evsel->core.attr.exclude_hv     = 1;
-
+               evsel->fallenback_eacces = true;
                return true;
        } else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
                   !evsel->exclude_GH) {
@@ -3851,7 +3868,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
                /* Apple M1 requires exclude_guest */
                scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples");
                evsel->core.attr.exclude_guest = 1;
-
+               evsel->fallenback_eopnotsupp = true;
                return true;
        }
 no_fallback:
index a3d754c029a0243078db94ec8494eb49d563db1b..97f57fab28ce9e8461cb347e31f652116ee2ef9c 100644 (file)
@@ -124,6 +124,8 @@ struct evsel {
        bool                    default_metricgroup; /* A member of the Default metricgroup */
        bool                    default_show_events; /* If a default group member, show the event */
        bool                    needs_uniquify;
+       bool                    fallenback_eacces;
+       bool                    fallenback_eopnotsupp;
        struct hashmap          *per_pkg_mask;
        int                     err;
        int                     script_output_type;