The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf stat --bpf-counters -e cpu_core/cycles/,cpu_core/instructions/ sleep 1
Performance counter stats for 'sleep 1':
2,442,583 cpu_core/cycles/
2,494,425 cpu_core/instructions/
1.
002687372 seconds time elapsed
0.
001126000 seconds user
0.
001166000 seconds sys
root@x1:~# perf trace -e bpf --max-events 10 perf stat --bpf-counters -e cpu_core/cycles/,cpu_core/instructions/ sleep 1
0.000 ( 0.019 ms): perf/
2944119 bpf(cmd: OBJ_GET, uattr: 0x7fffdf5cdd40, size: 20) = 5
0.021 ( 0.002 ms): perf/
2944119 bpf(cmd: OBJ_GET_INFO_BY_FD, uattr: 0x7fffdf5cdcd0, size: 16) = 0
0.030 ( 0.005 ms): perf/
2944119 bpf(cmd: MAP_LOOKUP_ELEM, uattr: 0x7fffdf5ceda0, size: 32) = 0
0.037 ( 0.004 ms): perf/
2944119 bpf(cmd: LINK_GET_FD_BY_ID, uattr: 0x7fffdf5ced80, size: 12) = -1 ENOENT (No such file or directory)
0.189 ( 0.004 ms): perf/
2944119 bpf(cmd: 36, uattr: 0x7fffdf5cec10, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.201 ( 0.095 ms): perf/
2944119 bpf(cmd: PROG_LOAD, uattr: 0x7fffdf5ce940, size: 148) = 10
0.305 ( 0.026 ms): perf/
2944119 bpf(cmd: PROG_LOAD, uattr: 0x7fffdf5cea00, size: 148) = 10
0.347 ( 0.012 ms): perf/
2944119 bpf(cmd: BTF_LOAD, uattr: 0x7fffdf5ce8e0, size: 40) = 10
0.364 ( 0.004 ms): perf/
2944119 bpf(cmd: BTF_LOAD, uattr: 0x7fffdf5ce950, size: 40) = 10
0.376 ( 0.006 ms): perf/
2944119 bpf(cmd: BTF_LOAD, uattr: 0x7fffdf5ce730, size: 40) = 10
root@x1:~#
Performance counter stats for 'sleep 1':
271,221 cpu_core/cycles/
139,150 cpu_core/instructions/
1.
002881677 seconds time elapsed
0.
001318000 seconds user
0.
001314000 seconds sys
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-2-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
skel->rodata->num_cpus = total_cpus;
skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
+ if (cgroup_is_v2("perf_event") > 0)
+ skel->rodata->use_cgroup_v2 = 1;
+
BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
/* we need one copy of events per cpu for reading */
goto out;
}
- if (cgroup_is_v2("perf_event") > 0)
- skel->bss->use_cgroup_v2 = 1;
-
err = -1;
cgrp_switch = evsel__new(&cgrp_switch_attr);
const volatile __u32 num_events = 1;
const volatile __u32 num_cpus = 1;
+const volatile int use_cgroup_v2 = 0;
int enabled = 0;
-int use_cgroup_v2 = 0;
int perf_subsys_id = -1;
static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)