]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/ftrace: Implement DYNAMIC_FTRACE_WITH_JMP
authorMenglong Dong <menglong8.dong@gmail.com>
Tue, 18 Nov 2025 12:36:30 +0000 (20:36 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 24 Nov 2025 17:46:37 +0000 (09:46 -0800)
Implement the DYNAMIC_FTRACE_WITH_JMP for x86_64. In ftrace_call_replace,
we will use JMP32_INSN_OPCODE instead of CALL_INSN_OPCODE if the address
should use "jmp".

Meanwhile, adjust the direct call in the ftrace_regs_caller. The RSB is
balanced in the "jmp" mode. Take the function "foo" for example:

 original_caller:
 call foo -> foo:
         call fentry -> fentry:
                 [do ftrace callbacks ]
                 move tramp_addr to stack
                 RET -> tramp_addr
                         tramp_addr:
                         [..]
                         call foo_body -> foo_body:
                                 [..]
                                 RET -> back to tramp_addr
                         [..]
                         RET -> back to original_caller

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/r/20251118123639.688444-3-dongml2@chinatelecom.cn
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/x86/Kconfig
arch/x86/kernel/ftrace.c
arch/x86/kernel/ftrace_64.S

index fa3b616af03a2d50eaf5f922bc8cd4e08a284045..462250a20311dc0f4e88bc229e1237928675bc3f 100644 (file)
@@ -230,6 +230,7 @@ config X86
        select HAVE_DYNAMIC_FTRACE_WITH_ARGS    if X86_64
        select HAVE_FTRACE_REGS_HAVING_PT_REGS  if X86_64
        select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+       select HAVE_DYNAMIC_FTRACE_WITH_JMP     if X86_64
        select HAVE_SAMPLE_FTRACE_DIRECT        if X86_64
        select HAVE_SAMPLE_FTRACE_DIRECT_MULTI  if X86_64
        select HAVE_EBPF_JIT
index 4450acec93903de10301686c6d261e7855c4bd77..0543b57f54ee4d10832d7769cdf9e51d6a7276e5 100644 (file)
@@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
         * No need to translate into a callthunk. The trampoline does
         * the depth accounting itself.
         */
-       return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+       if (ftrace_is_jmp(addr)) {
+               addr = ftrace_jmp_get(addr);
+               return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
+       } else {
+               return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+       }
 }
 
 static int ftrace_verify_code(unsigned long ip, const char *old_code)
index 823dbdd0eb41093f47ed71a19ba9b6a01bca5af4..a132608265f6c01cc23b20b2361c611043930793 100644 (file)
@@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
        RET
 
+1:
+       testb   $1, %al
+       jz      2f
+       andq $0xfffffffffffffffe, %rax
+       movq %rax, MCOUNT_REG_SIZE+8(%rsp)
+       restore_mcount_regs
+       /* Restore flags */
+       popfq
+       RET
+
        /* Swap the flags with orig_rax */
-1:     movq MCOUNT_REG_SIZE(%rsp), %rdi
+2:     movq MCOUNT_REG_SIZE(%rsp), %rdi
        movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
        movq %rax, MCOUNT_REG_SIZE(%rsp)