From: Puranjay Mohan Date: Tue, 23 Sep 2025 11:01:50 +0000 (+0000) Subject: bpf, arm64: Add support for signed arena loads X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=eab2a71f3a6a5c5f4d4acaffb962cb39c199421c;p=thirdparty%2Fkernel%2Fstable.git bpf, arm64: Add support for signed arena loads Add support for signed loads from arena which are internally converted to loads with mode set BPF_PROBE_MEM32SX by the verifier. The implementation is similar to BPF_PROBE_MEMSX and BPF_MEMSX but for BPF_PROBE_MEM32SX, arena_vm_base is added to the src register to form the address. Signed-off-by: Puranjay Mohan Link: https://lore.kernel.org/r/20250923110157.18326-3-puranjay@kernel.org Signed-off-by: Alexei Starovoitov --- diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 796938b535cd1..288d9cc73919f 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1133,12 +1133,14 @@ static int add_exception_handler(const struct bpf_insn *insn, return 0; if (BPF_MODE(insn->code) != BPF_PROBE_MEM && - BPF_MODE(insn->code) != BPF_PROBE_MEMSX && - BPF_MODE(insn->code) != BPF_PROBE_MEM32 && - BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) + BPF_MODE(insn->code) != BPF_PROBE_MEMSX && + BPF_MODE(insn->code) != BPF_PROBE_MEM32 && + BPF_MODE(insn->code) != BPF_PROBE_MEM32SX && + BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) return 0; is_arena = (BPF_MODE(insn->code) == BPF_PROBE_MEM32) || + (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX) || (BPF_MODE(insn->code) == BPF_PROBE_ATOMIC); if (!ctx->prog->aux->extable || @@ -1659,7 +1661,11 @@ emit_cond_jmp: case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: - if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { + case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B: + case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H: + case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W: + if (BPF_MODE(insn->code) == BPF_PROBE_MEM32 || + BPF_MODE(insn->code) == BPF_PROBE_MEM32SX) { emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx); src = tmp2; } @@ -1671,7 +1677,8 @@ emit_cond_jmp: off_adj = off; } sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX || - BPF_MODE(insn->code) == BPF_PROBE_MEMSX); + BPF_MODE(insn->code) == BPF_PROBE_MEMSX || + BPF_MODE(insn->code) == BPF_PROBE_MEM32SX); switch (BPF_SIZE(code)) { case BPF_W: if (is_lsi_offset(off_adj, 2)) { @@ -1879,9 +1886,11 @@ emit_cond_jmp: if (ret) return ret; - ret = add_exception_handler(insn, ctx, dst); - if (ret) - return ret; + if (BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) { + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; + } break; default: @@ -3064,11 +3073,6 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) if (!bpf_atomic_is_load_store(insn) && !cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) return false; - break; - case BPF_LDX | BPF_MEMSX | BPF_B: - case BPF_LDX | BPF_MEMSX | BPF_H: - case BPF_LDX | BPF_MEMSX | BPF_W: - return false; } return true; }