]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: Improve reliability of test_perf_branches_no_hw()
authorMatt Bobrowski <mattbobrowski@google.com>
Wed, 19 Nov 2025 14:35:40 +0000 (14:35 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 22 Nov 2025 00:49:16 +0000 (16:49 -0800)
Currently, test_perf_branches_no_hw() relies on the busy loop within
test_perf_branches_common() being slow enough to allow at least one
perf event sample tick to occur before starting to tear down the
backing perf event BPF program. With a relatively small fixed
iteration count of 1,000,000, this is not guaranteed on modern fast
CPUs, resulting in the test run to subsequently fail with the
following:

bpf_testmod.ko is already unloaded.
Loading bpf_testmod.ko...
Successfully loaded bpf_testmod.ko.
test_perf_branches_common:PASS:test_perf_branches_load 0 nsec
test_perf_branches_common:PASS:attach_perf_event 0 nsec
test_perf_branches_common:PASS:set_affinity 0 nsec
check_good_sample:PASS:output not valid 0 nsec
check_good_sample:PASS:read_branches_size 0 nsec
check_good_sample:PASS:read_branches_stack 0 nsec
check_good_sample:PASS:read_branches_stack 0 nsec
check_good_sample:PASS:read_branches_global 0 nsec
check_good_sample:PASS:read_branches_global 0 nsec
check_good_sample:PASS:read_branches_size 0 nsec
test_perf_branches_no_hw:PASS:perf_event_open 0 nsec
test_perf_branches_common:PASS:test_perf_branches_load 0 nsec
test_perf_branches_common:PASS:attach_perf_event 0 nsec
test_perf_branches_common:PASS:set_affinity 0 nsec
check_bad_sample:FAIL:output not valid no valid sample from prog
Summary: 0/1 PASSED, 0 SKIPPED, 1 FAILED
Successfully unloaded bpf_testmod.ko.

On a modern CPU (i.e. one with a 3.5 GHz clock rate), executing 1
million increments of a volatile integer can take significantly less
than 1 millisecond. If the spin loop and detachment of the perf event
BPF program elapses before the first 1 ms sampling interval elapses,
the perf event will never end up firing. Fix this by bumping the loop
iteration counter a little within test_perf_branches_common(), along
with ensuring adding another loop termination condition which is
directly influenced by the backing perf event BPF program
executing. Notably, a concious decision was made to not adjust the
sample_freq value as that is just not a reliable way to go about
fixing the problem. It effectively still leaves the race window open.

Fixes: 67306f84ca78c ("selftests/bpf: Add bpf_read_branch_records() selftest")
Signed-off-by: Matt Bobrowski <mattbobrowski@google.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20251119143540.2911424-1-mattbobrowski@google.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/perf_branches.c
tools/testing/selftests/bpf/progs/test_perf_branches.c

index 06c7986131d96fb5bc81783399d944d84881bc82..0a7ef770c487c8bdd05bcde507a9b573e7d8944e 100644 (file)
@@ -15,6 +15,10 @@ static void check_good_sample(struct test_perf_branches *skel)
        int pbe_size = sizeof(struct perf_branch_entry);
        int duration = 0;
 
+       if (CHECK(!skel->bss->run_cnt, "invalid run_cnt",
+                 "checked sample validity before prog run"))
+               return;
+
        if (CHECK(!skel->bss->valid, "output not valid",
                 "no valid sample from prog"))
                return;
@@ -45,6 +49,10 @@ static void check_bad_sample(struct test_perf_branches *skel)
        int written_stack = skel->bss->written_stack_out;
        int duration = 0;
 
+       if (CHECK(!skel->bss->run_cnt, "invalid run_cnt",
+                 "checked sample validity before prog run"))
+               return;
+
        if (CHECK(!skel->bss->valid, "output not valid",
                 "no valid sample from prog"))
                return;
@@ -83,8 +91,12 @@ static void test_perf_branches_common(int perf_fd,
        err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
        if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
                goto out_destroy;
-       /* spin the loop for a while (random high number) */
-       for (i = 0; i < 1000000; ++i)
+
+       /* Spin the loop for a while by using a high iteration count, and by
+        * checking whether the specific run count marker has been explicitly
+        * incremented at least once by the backing perf_event BPF program.
+        */
+       for (i = 0; i < 100000000 && !*(volatile int *)&skel->bss->run_cnt; ++i)
                ++j;
 
        test_perf_branches__detach(skel);
index a1ccc831c882f6c104090069aa867ea5f3e0cedb..05ac9410cd68c95c4d87e0e686fe6995c35d1457 100644 (file)
@@ -8,6 +8,7 @@
 #include <bpf/bpf_tracing.h>
 
 int valid = 0;
+int run_cnt = 0;
 int required_size_out = 0;
 int written_stack_out = 0;
 int written_global_out = 0;
@@ -24,6 +25,8 @@ int perf_branches(void *ctx)
        __u64 entries[4 * 3] = {0};
        int required_size, written_stack, written_global;
 
+       ++run_cnt;
+
        /* write to stack */
        written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
        /* ignore spurious events */