]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
selftests/bpf: Test accesses to ctx padding
authorPaul Chaignon <paul.chaignon@gmail.com>
Wed, 17 Sep 2025 08:10:53 +0000 (10:10 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 17 Sep 2025 14:15:57 +0000 (16:15 +0200)
This patch adds tests covering the various paddings in ctx structures.
In case of sk_lookup BPF programs, the behavior is a bit different
because accesses to the padding are explicitly allowed. Other cases
result in a clear reject from the verifier.

Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/3dc5f025e350aeb2bb1c257b87c577518e574aeb.1758094761.git.paul.chaignon@gmail.com
tools/testing/selftests/bpf/progs/verifier_ctx.c

index b927906aa305a95d312b141d3b7ecee701edde76..5ebf7d9bcc55e1b0062fb78e40845b6dbccfb98e 100644 (file)
@@ -262,4 +262,34 @@ narrow_load("sockops", bpf_sock_ops, skb_hwtstamp);
 unaligned_access("flow_dissector", __sk_buff, data);
 unaligned_access("netfilter", bpf_nf_ctx, skb);
 
+#define padding_access(type, ctx, prev_field, sz)                      \
+       SEC(type)                                                       \
+       __description("access on " #ctx " padding after " #prev_field)  \
+       __naked void padding_ctx_access_##ctx(void)                     \
+       {                                                               \
+               asm volatile ("                                         \
+               r1 = *(u%[size] *)(r1 + %[off]);                        \
+               r0 = 0;                                                 \
+               exit;"                                                  \
+               :                                                       \
+               : __imm_const(size, sz * 8),                            \
+                 __imm_const(off, offsetofend(struct ctx, prev_field)) \
+               : __clobber_all);                                       \
+       }
+
+__failure __msg("invalid bpf_context access")
+padding_access("cgroup/bind4", bpf_sock_addr, msg_src_ip6[3], 4);
+
+__success
+padding_access("sk_lookup", bpf_sk_lookup, remote_port, 2);
+
+__failure __msg("invalid bpf_context access")
+padding_access("tc", __sk_buff, tstamp_type, 2);
+
+__failure __msg("invalid bpf_context access")
+padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2);
+
+__failure __msg("invalid bpf_context access")
+padding_access("sk_reuseport", sk_reuseport_md, hash, 4);
+
 char _license[] SEC("license") = "GPL";