]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
LoongArch: BPF: Enable trampoline-based tracing for module functions
authorChenghao Duan <duanchenghao@kylinos.cn>
Wed, 31 Dec 2025 07:19:21 +0000 (15:19 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Jan 2026 09:17:13 +0000 (10:17 +0100)
commit 26138762d9a27a7f1c33f467c4123c600f64a36e upstream.

Remove the previous restrictions that blocked the tracing of kernel
module functions. Fix the issue that previously caused kernel lockups
when attempting to trace module functions.

Before entering the trampoline code, the return address register ra
shall store the address of the next assembly instruction after the
'bl trampoline' instruction, which is the traced function address, and
the register t0 shall store the parent function return address. Refine
the trampoline return logic to ensure that register data remains correct
when returning to both the traced function and the parent function.

Before this patch was applied, the module_attach test in selftests/bpf
encountered a deadlock issue. This was caused by an incorrect jump
address after the trampoline execution, which resulted in an infinite
loop within the module function.

Cc: stable@vger.kernel.org
Fixes: 677e6123e3d2 ("LoongArch: BPF: Disable trampoline for kernel module function trace")
Signed-off-by: Chenghao Duan <duanchenghao@kylinos.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/loongarch/net/bpf_jit.c

index 1d09c0d8082e16a922c593786fdac6dbf771d8fe..39753ce2d017e9efc36bf58ecd0539e23102cab2 100644 (file)
@@ -1284,7 +1284,7 @@ static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
                return 0;
        }
 
-       return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
+       return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_RA : LOONGARCH_GPR_ZERO, (u64)target);
 }
 
 static int emit_call(struct jit_ctx *ctx, u64 addr)
@@ -1638,14 +1638,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
 
        /* To traced function */
        /* Ftrace jump skips 2 NOP instructions */
-       if (is_kernel_text((unsigned long)orig_call))
+       if (is_kernel_text((unsigned long)orig_call) ||
+           is_module_text_address((unsigned long)orig_call))
                orig_call += LOONGARCH_FENTRY_NBYTES;
        /* Direct jump skips 5 NOP instructions */
        else if (is_bpf_text_address((unsigned long)orig_call))
                orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
-       /* Module tracing not supported - cause kernel lockups */
-       else if (is_module_text_address((unsigned long)orig_call))
-               return -ENOTSUPP;
 
        if (flags & BPF_TRAMP_F_CALL_ORIG) {
                move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
@@ -1738,12 +1736,16 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
                emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
                emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
 
-               if (flags & BPF_TRAMP_F_SKIP_FRAME)
+               if (flags & BPF_TRAMP_F_SKIP_FRAME) {
                        /* return to parent function */
-                       emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
-               else
-                       /* return to traced function */
+                       move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0);
                        emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
+               } else {
+                       /* return to traced function */
+                       move_reg(ctx, LOONGARCH_GPR_T1, LOONGARCH_GPR_RA);
+                       move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0);
+                       emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T1, 0);
+               }
        }
 
        ret = ctx->idx;