]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf, x86: Add support for signed arena loads
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Tue, 23 Sep 2025 11:01:49 +0000 (11:01 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 23 Sep 2025 19:00:22 +0000 (12:00 -0700)
Currently, signed load instructions into arena memory are unsupported.
The compiler is free to generate these, and on GCC-14 we see a
corresponding error when it happens. The hurdle in supporting them is
deciding which unused opcode to use to mark them for the JIT's own
consumption. After much thinking, it appears 0xc0 / BPF_NOSPEC can be
combined with load instructions to identify signed arena loads. Use
this to recognize and JIT them appropriately, and remove the verifier
side limitation on the program if the JIT supports them.

Co-developed-by: Puranjay Mohan <puranjay@kernel.org>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Link: https://lore.kernel.org/r/20250923110157.18326-2-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/arm64/net/bpf_jit_comp.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp.c
include/linux/filter.h
kernel/bpf/verifier.c

index e36261c6395296babc2fea121e8375fe3a8590c1..796938b535cd1f17494b301ca0190a514a99db22 100644 (file)
@@ -3064,6 +3064,11 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
                if (!bpf_atomic_is_load_store(insn) &&
                    !cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
                        return false;
+               break;
+       case BPF_LDX | BPF_MEMSX | BPF_B:
+       case BPF_LDX | BPF_MEMSX | BPF_H:
+       case BPF_LDX | BPF_MEMSX | BPF_W:
+               return false;
        }
        return true;
 }
index 14d7aab61fcb36bf74ceea47857a243cf505d6a0..83672373d02640f5a014411867bf03335e68fcd6 100644 (file)
@@ -2066,6 +2066,11 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
                case BPF_STX | BPF_ATOMIC | BPF_DW:
                        if (insn->imm == BPF_CMPXCHG)
                                return rv_ext_enabled(ZACAS);
+                       break;
+               case BPF_LDX | BPF_MEMSX | BPF_B:
+               case BPF_LDX | BPF_MEMSX | BPF_H:
+               case BPF_LDX | BPF_MEMSX | BPF_W:
+                       return false;
                }
        }
 
index 8b57d8532f362ee8e23fb3117704f36b6cdcf252..cf461d76e9da32e3d5576de2f20deb37992ad149 100644 (file)
@@ -2967,6 +2967,11 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
        case BPF_STX | BPF_ATOMIC | BPF_DW:
                if (bpf_atomic_is_load_store(insn))
                        return false;
+               break;
+       case BPF_LDX | BPF_MEMSX | BPF_B:
+       case BPF_LDX | BPF_MEMSX | BPF_H:
+       case BPF_LDX | BPF_MEMSX | BPF_W:
+               return false;
        }
        return true;
 }
index 8d34a9400a5e49a49f1543d16b7afe2412d85913..fc13306af15fa849ca60df2d022da2bfeb8ed8fe 100644 (file)
@@ -1152,11 +1152,38 @@ static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 i
        *pprog = prog;
 }
 
+static void emit_ldsx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+       u8 *prog = *pprog;
+
+       switch (size) {
+       case BPF_B:
+               /* movsx rax, byte ptr [rax + r12 + off] */
+               EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBE);
+               break;
+       case BPF_H:
+               /* movsx rax, word ptr [rax + r12 + off] */
+               EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBF);
+               break;
+       case BPF_W:
+               /* movsx rax, dword ptr [rax + r12 + off] */
+               EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x63);
+               break;
+       }
+       emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
+       *pprog = prog;
+}
+
 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 {
        emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
 }
 
+static void emit_ldsx_r12(u8 **prog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+       emit_ldsx_index(prog, size, dst_reg, src_reg, X86_REG_R12, off);
+}
+
 /* STX: *(u8*)(dst_reg + off) = src_reg */
 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
 {
@@ -2109,15 +2136,22 @@ st:                     if (is_imm8(insn->off))
                case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
                case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
                case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+               case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
+               case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
+               case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
                case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
                case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
                case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
                case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
                        start_of_ldx = prog;
-                       if (BPF_CLASS(insn->code) == BPF_LDX)
-                               emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
-                       else
+                       if (BPF_CLASS(insn->code) == BPF_LDX) {
+                               if (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX)
+                                       emit_ldsx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+                               else
+                                       emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+                       } else {
                                emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+                       }
 populate_extable:
                        {
                                struct exception_table_entry *ex;
index 4241a885975fa1c7cc8f867df8853b3e8f8640b6..f5c859b8131a3e5fa5111b60cc291cedd44f096d 100644 (file)
@@ -78,6 +78,9 @@ struct ctl_table_header;
 /* unused opcode to mark special atomic instruction */
 #define BPF_PROBE_ATOMIC 0xe0
 
+/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
+#define BPF_PROBE_MEM32SX 0xc0
+
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS  0xe0
 
index ceeb0ffe7d67435e708804fce77991d2a0cf4709..1641992371761f14851ecab7ad906612008fabc8 100644 (file)
@@ -21379,10 +21379,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        continue;
                case PTR_TO_ARENA:
                        if (BPF_MODE(insn->code) == BPF_MEMSX) {
-                               verbose(env, "sign extending loads from arena are not supported yet\n");
-                               return -EOPNOTSUPP;
+                               if (!bpf_jit_supports_insn(insn, true)) {
+                                       verbose(env, "sign extending loads from arena are not supported yet\n");
+                                       return -EOPNOTSUPP;
+                               }
+                               insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32SX | BPF_SIZE(insn->code);
+                       } else {
+                               insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code);
                        }
-                       insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code);
                        env->prog->aux->num_exentries++;
                        continue;
                default:
@@ -21588,6 +21592,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                        if (BPF_CLASS(insn->code) == BPF_LDX &&
                            (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
                             BPF_MODE(insn->code) == BPF_PROBE_MEM32 ||
+                            BPF_MODE(insn->code) == BPF_PROBE_MEM32SX ||
                             BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
                                num_exentries++;
                        if ((BPF_CLASS(insn->code) == BPF_STX ||