From: Kumar Kartikeya Dwivedi Date: Fri, 27 Feb 2026 00:57:25 +0000 (-0800) Subject: selftests/bpf: Add tests for ctx fixed offset support X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=f6312e71759ddb10b20fbdb9ee01b9546cabd4e3;p=thirdparty%2Flinux.git selftests/bpf: Add tests for ctx fixed offset support Add tests to ensure PTR_TO_CTX supports fixed offsets for program types that don't rewrite accesses to it. Ensure that variable offsets and negative offsets are still rejected. An extra test also checks writing into ctx with modified offset for syscall progs. Other program types do not support writes (notably, writable tracepoints offer a pointer for a writable buffer through ctx, but don't allow writing to the ctx itself). Before the fix made in the previous commit, these tests do not succeed, except the ones testing for failures regardless of the change. Signed-off-by: Kumar Kartikeya Dwivedi Reviewed-by: Emil Tsalapatis Link: https://lore.kernel.org/r/20260227005725.1247305-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c index 5ebf7d9bcc55e..371780290c0da 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -292,4 +292,80 @@ padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2); __failure __msg("invalid bpf_context access") padding_access("sk_reuseport", sk_reuseport_md, hash, 4); +SEC("syscall") +__description("syscall: write to ctx with fixed offset") +__success +__naked void syscall_ctx_fixed_off_write(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u32*)(r1 + 0) = r0; \ + r1 += 4; \ + *(u32*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +/* + * Test that program types without convert_ctx_access can dereference + * their ctx pointer after adding a fixed offset. Variable and negative + * offsets should still be rejected. + */ +#define no_rewrite_ctx_access(type, name, off, ld_op) \ + SEC(type) \ + __description(type ": read ctx at fixed offset") \ + __success \ + __naked void no_rewrite_##name##_fixed(void) \ + { \ + asm volatile (" \ + r1 += %[__off]; \ + r0 = *(" #ld_op " *)(r1 + 0); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(__off, off) \ + : __clobber_all); \ + } \ + SEC(type) \ + __description(type ": reject variable offset ctx access") \ + __failure __msg("variable ctx access var_off=") \ + __naked void no_rewrite_##name##_var(void) \ + { \ + asm volatile (" \ + r6 = r1; \ + call %[bpf_get_prandom_u32]; \ + r1 = r6; \ + r0 &= 4; \ + r1 += r0; \ + r0 = *(" #ld_op " *)(r1 + 0); \ + r0 = 0; \ + exit;" \ + : \ + : __imm(bpf_get_prandom_u32) \ + : __clobber_all); \ + } \ + SEC(type) \ + __description(type ": reject negative offset ctx access") \ + __failure __msg("negative offset ctx ptr") \ + __naked void no_rewrite_##name##_neg(void) \ + { \ + asm volatile (" \ + r1 += %[__neg_off]; \ + r0 = *(" #ld_op " *)(r1 + 0); \ + r0 = 0; \ + exit;" \ + : \ + : __imm_const(__neg_off, -(off)) \ + : __clobber_all); \ + } + +no_rewrite_ctx_access("syscall", syscall, 4, u32); +no_rewrite_ctx_access("kprobe", kprobe, 8, u64); +no_rewrite_ctx_access("tracepoint", tp, 8, u64); +no_rewrite_ctx_access("raw_tp", raw_tp, 8, u64); +no_rewrite_ctx_access("raw_tracepoint.w", raw_tp_w, 8, u64); +no_rewrite_ctx_access("fentry/bpf_modify_return_test", fentry, 8, u64); +no_rewrite_ctx_access("cgroup/dev", cgroup_dev, 4, u32); +no_rewrite_ctx_access("netfilter", netfilter, offsetof(struct bpf_nf_ctx, skb), u64); + char _license[] SEC("license") = "GPL";