]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf, arm64: Add support for signed arena loads
authorPuranjay Mohan <puranjay@kernel.org>
Tue, 23 Sep 2025 11:01:50 +0000 (11:01 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 23 Sep 2025 19:00:22 +0000 (12:00 -0700)
Add support for signed loads from arena which are internally converted
to loads with mode set BPF_PROBE_MEM32SX by the verifier. The
implementation is similar to BPF_PROBE_MEMSX and BPF_MEMSX but for
BPF_PROBE_MEM32SX, arena_vm_base is added to the src register to form
the address.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Link: https://lore.kernel.org/r/20250923110157.18326-3-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/arm64/net/bpf_jit_comp.c

index 796938b535cd1f17494b301ca0190a514a99db22..288d9cc73919fff249baea53368c9de969aff2e8 100644 (file)
@@ -1133,12 +1133,14 @@ static int add_exception_handler(const struct bpf_insn *insn,
                return 0;
 
        if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
-               BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
-                       BPF_MODE(insn->code) != BPF_PROBE_MEM32 &&
-                               BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
+           BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+           BPF_MODE(insn->code) != BPF_PROBE_MEM32 &&
+           BPF_MODE(insn->code) != BPF_PROBE_MEM32SX &&
+           BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
                return 0;
 
        is_arena = (BPF_MODE(insn->code) == BPF_PROBE_MEM32) ||
+                  (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX) ||
                   (BPF_MODE(insn->code) == BPF_PROBE_ATOMIC);
 
        if (!ctx->prog->aux->extable ||
@@ -1659,7 +1661,11 @@ emit_cond_jmp:
        case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
        case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
        case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
-               if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+       case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
+       case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
+       case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
+               if (BPF_MODE(insn->code) == BPF_PROBE_MEM32 ||
+                   BPF_MODE(insn->code) == BPF_PROBE_MEM32SX) {
                        emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
                        src = tmp2;
                }
@@ -1671,7 +1677,8 @@ emit_cond_jmp:
                        off_adj = off;
                }
                sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX ||
-                               BPF_MODE(insn->code) == BPF_PROBE_MEMSX);
+                               BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
+                                BPF_MODE(insn->code) == BPF_PROBE_MEM32SX);
                switch (BPF_SIZE(code)) {
                case BPF_W:
                        if (is_lsi_offset(off_adj, 2)) {
@@ -1879,9 +1886,11 @@ emit_cond_jmp:
                if (ret)
                        return ret;
 
-               ret = add_exception_handler(insn, ctx, dst);
-               if (ret)
-                       return ret;
+               if (BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) {
+                       ret = add_exception_handler(insn, ctx, dst);
+                       if (ret)
+                               return ret;
+               }
                break;
 
        default:
@@ -3064,11 +3073,6 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
                if (!bpf_atomic_is_load_store(insn) &&
                    !cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
                        return false;
-               break;
-       case BPF_LDX | BPF_MEMSX | BPF_B:
-       case BPF_LDX | BPF_MEMSX | BPF_H:
-       case BPF_LDX | BPF_MEMSX | BPF_W:
-               return false;
        }
        return true;
 }