]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf, x86: allow indirect jumps to r8...r15
authorAnton Protopopov <a.s.protopopov@gmail.com>
Wed, 5 Nov 2025 09:04:05 +0000 (09:04 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 6 Nov 2025 01:53:22 +0000 (17:53 -0800)
Currently the emit_indirect_jump() function only accepts one of the
RAX, RCX, ..., RBP registers as the destination. Make it to accept
R8, R9, ..., R15 as well, and make callers to pass BPF registers, not
native registers. This is required to enable indirect jumps support
in eBPF.

Signed-off-by: Anton Protopopov <a.s.protopopov@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20251105090410.1250500-8-a.s.protopopov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/x86/net/bpf_jit_comp.c

index 91f92d65ae83702e5e175f75a5ea7a458c348d4d..bbd2b03d2b74c29ecab7829b2210b36bb52d21cc 100644 (file)
@@ -660,24 +660,38 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 
 #define EMIT_LFENCE()  EMIT3(0x0F, 0xAE, 0xE8)
 
-static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
+static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
 {
        u8 *prog = *pprog;
 
+       if (ereg)
+               EMIT1(0x41);
+
+       EMIT2(0xFF, 0xE0 + reg);
+
+       *pprog = prog;
+}
+
+static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
+{
+       u8 *prog = *pprog;
+       int reg = reg2hex[bpf_reg];
+       bool ereg = is_ereg(bpf_reg);
+
        if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
                OPTIMIZER_HIDE_VAR(reg);
-               emit_jump(&prog, its_static_thunk(reg), ip);
+               emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
                EMIT_LFENCE();
-               EMIT2(0xFF, 0xE0 + reg);
+               __emit_indirect_jump(&prog, reg, ereg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
                OPTIMIZER_HIDE_VAR(reg);
                if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
-                       emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
+                       emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
                else
-                       emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
+                       emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
        } else {
-               EMIT2(0xFF, 0xE0 + reg);        /* jmp *%\reg */
+               __emit_indirect_jump(&prog, reg, ereg);
                if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
                        EMIT1(0xCC);            /* int3 */
        }
@@ -797,7 +811,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
         * rdi == ctx (1st arg)
         * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
         */
-       emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
+       emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
 
        /* out: */
        ctx->tail_call_indirect_label = prog - start;
@@ -3543,7 +3557,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image,
                if (err)
                        return err;
 
-               emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
+               emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
 
                *pprog = prog;
                return 0;