]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf, x86, riscv, arm: no_caller_saved_registers for bpf_get_smp_processor_id()
authorEduard Zingerman <eddyz87@gmail.com>
Mon, 22 Jul 2024 23:38:37 +0000 (16:38 -0700)
committerAndrii Nakryiko <andrii@kernel.org>
Mon, 29 Jul 2024 22:05:05 +0000 (15:05 -0700)
The function bpf_get_smp_processor_id() is processed in a different
way, depending on the arch:
- on x86 verifier replaces call to bpf_get_smp_processor_id() with a
  sequence of instructions that modify only r0;
- on riscv64 jit replaces call to bpf_get_smp_processor_id() with a
  sequence of instructions that modify only r0;
- on arm64 jit replaces call to bpf_get_smp_processor_id() with a
  sequence of instructions that modify only r0 and tmp registers.

These rewrites satisfy attribute no_caller_saved_registers contract.
Allow rewrite of no_caller_saved_registers patterns for
bpf_get_smp_processor_id() in order to use this function as a canary
for no_caller_saved_registers tests.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240722233844.1406874-4-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
kernel/bpf/helpers.c
kernel/bpf/verifier.c

index b5f0adae82933b79b6543483aec0f28723edd2ee..d02ae323996ba0f835626fd87b029c710d407aca 100644 (file)
@@ -158,6 +158,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
        .func           = bpf_get_smp_processor_id,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
+       .allow_nocsr    = true,
 };
 
 BPF_CALL_0(bpf_get_numa_node_id)
index 7587336967cc30a9bf88134997f3d09bcbec273f..df3be12096cf2415b9c602bbeb6846bd66703900 100644 (file)
@@ -16130,7 +16130,14 @@ static u32 helper_nocsr_clobber_mask(const struct bpf_func_proto *fn)
  */
 static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
 {
-       return false;
+       switch (imm) {
+#ifdef CONFIG_X86_64
+       case BPF_FUNC_get_smp_processor_id:
+               return env->prog->jit_requested && bpf_jit_supports_percpu_insn();
+#endif
+       default:
+               return false;
+       }
 }
 
 /* GCC and LLVM define a no_caller_saved_registers function attribute.
@@ -20834,7 +20841,7 @@ patch_map_ops_generic:
 #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
                /* Implement bpf_get_smp_processor_id() inline. */
                if (insn->imm == BPF_FUNC_get_smp_processor_id &&
-                   prog->jit_requested && bpf_jit_supports_percpu_insn()) {
+                   verifier_inlines_helper_call(env, insn->imm)) {
                        /* BPF_FUNC_get_smp_processor_id inlining is an
                         * optimization, so if pcpu_hot.cpu_number is ever
                         * changed in some incompatible and hard to support