]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests/bpf: validate async callback return value check correctness
authorAndrii Nakryiko <andrii@kernel.org>
Sat, 2 Dec 2023 17:57:03 +0000 (09:57 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 2 Dec 2023 19:36:51 +0000 (11:36 -0800)
Adjust timer/timer_ret_1 test to validate more carefully verifier logic
of enforcing async callback return value. This test will pass only if
return result is marked precise and read.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231202175705.885270-10-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/timer_failure.c

index 9000da1e2120ab18c31886f485487dc2a8cf70b9..9fbc69c77bbbf45d066f5a98f1cc3135e769ca7b 100644 (file)
@@ -21,17 +21,37 @@ struct {
        __type(value, struct elem);
 } timer_map SEC(".maps");
 
-static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
+__naked __noinline __used
+static unsigned long timer_cb_ret_bad()
 {
-       if (bpf_get_smp_processor_id() % 2)
-               return 1;
-       else
-               return 0;
+       asm volatile (
+               "call %[bpf_get_prandom_u32];"
+               "if r0 s> 1000 goto 1f;"
+               "r0 = 0;"
+       "1:"
+               "goto +0;" /* checkpoint */
+               /* async callback is expected to return 0, so branch above
+                * skipping r0 = 0; should lead to a failure, but if exit
+                * instruction doesn't enforce r0's precision, this callback
+                * will be successfully verified
+                */
+               "exit;"
+               :
+               : __imm(bpf_get_prandom_u32)
+               : __clobber_common
+       );
 }
 
 SEC("fentry/bpf_fentry_test1")
-__failure __msg("should have been in [0, 0]")
-int BPF_PROG2(test_ret_1, int, a)
+__log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure
+/* check that fallthrough code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before 22: (b7) r0 = 0")
+/* check that branch code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before 24: (85) call bpf_get_prandom_u32#7")
+__msg("should have been in [0, 0]")
+long BPF_PROG2(test_bad_ret, int, a)
 {
        int key = 0;
        struct bpf_timer *timer;
@@ -39,7 +59,7 @@ int BPF_PROG2(test_ret_1, int, a)
        timer = bpf_map_lookup_elem(&timer_map, &key);
        if (timer) {
                bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
-               bpf_timer_set_callback(timer, timer_cb_ret1);
+               bpf_timer_set_callback(timer, timer_cb_ret_bad);
                bpf_timer_start(timer, 1000, 0);
        }