]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
fgraph: Pass ftrace_regs to entryfunc
authorMasami Hiramatsu (Google) <mhiramat@kernel.org>
Thu, 26 Dec 2024 05:11:40 +0000 (14:11 +0900)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Thu, 26 Dec 2024 15:50:02 +0000 (10:50 -0500)
Pass ftrace_regs to the fgraph_ops::entryfunc(). If ftrace_regs is not
available, it passes a NULL instead. User callback function can access
some registers (including return address) via this ftrace_regs.

Note that the ftrace_regs can be NULL when the arch does NOT define:
HAVE_DYNAMIC_FTRACE_WITH_ARGS or HAVE_DYNAMIC_FTRACE_WITH_REGS.
More specifically, if HAVE_DYNAMIC_FTRACE_WITH_REGS is defined but
not the HAVE_DYNAMIC_FTRACE_WITH_ARGS, and the ftrace ops used to
register the function callback does not set FTRACE_OPS_FL_SAVE_REGS.
In this case, ftrace_regs can be NULL in user callback.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Naveen N Rao <naveen@kernel.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/173518990044.391279.17406984900626078579.stgit@devnote2
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
14 files changed:
arch/arm64/kernel/ftrace.c
arch/loongarch/kernel/ftrace_dyn.c
arch/powerpc/kernel/trace/ftrace.c
arch/powerpc/kernel/trace/ftrace_64_pg.c
arch/riscv/kernel/ftrace.c
arch/x86/kernel/ftrace.c
include/linux/ftrace.h
kernel/trace/fgraph.c
kernel/trace/ftrace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c

index 245cb419ca24da68279ec94b233dffae8ae5a853..570c38be833c13bc1704702039b0a49902df20eb 100644 (file)
@@ -481,7 +481,20 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
-       prepare_ftrace_return(ip, &arch_ftrace_regs(fregs)->lr, arch_ftrace_regs(fregs)->fp);
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       unsigned long frame_pointer = arch_ftrace_regs(fregs)->fp;
+       unsigned long *parent = &arch_ftrace_regs(fregs)->lr;
+       unsigned long old;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+
+       if (!function_graph_enter_regs(old, ip, frame_pointer,
+                                      (void *)frame_pointer, fregs)) {
+               *parent = return_hooker;
+       }
 }
 #else
 /*
index 18056229e22e4d41d3a63d0e7d1674f81d539594..25c9a4cfd5fa9284201d8624f37acd1944a05969 100644 (file)
@@ -243,8 +243,16 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
 {
        struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
        unsigned long *parent = (unsigned long *)&regs->regs[1];
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       unsigned long old;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
 
-       prepare_ftrace_return(ip, (unsigned long *)parent);
+       if (!function_graph_enter_regs(old, ip, 0, parent, fregs))
+               *parent = return_hooker;
 }
 #else
 static int ftrace_modify_graph_caller(bool enable)
index e41daf2c4a3157e853a4c84535138ff147c9ccca..2f776f137a89ecfe83f060e62c689ed46ee6b56f 100644 (file)
@@ -665,7 +665,7 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
 
-       if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
+       if (!function_graph_enter_regs(parent_ip, ip, 0, (unsigned long *)sp, fregs))
                parent_ip = ppc_function_entry(return_to_handler);
 
 out:
index 8fb860b90ae1c4028c564aa071a6cb96771b0895..ac35015f04c6ad2dd3eaeab88c20bc0a8d4a3663 100644 (file)
@@ -787,7 +787,8 @@ int ftrace_disable_ftrace_graph_caller(void)
  * in current thread info. Return the address we want to divert to.
  */
 static unsigned long
-__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
+__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp,
+                       struct ftrace_regs *fregs)
 {
        unsigned long return_hooker;
 
@@ -799,7 +800,7 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
 
        return_hooker = ppc_function_entry(return_to_handler);
 
-       if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
+       if (!function_graph_enter_regs(parent, ip, 0, (unsigned long *)sp, fregs))
                parent = return_hooker;
 
 out:
@@ -810,13 +811,14 @@ out:
 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
-       arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, arch_ftrace_regs(fregs)->regs.gpr[1]);
+       arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip,
+                                               arch_ftrace_regs(fregs)->regs.gpr[1], fregs);
 }
 #else
 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
                                    unsigned long sp)
 {
-       return __prepare_ftrace_return(parent, ip, sp);
+       return __prepare_ftrace_return(parent, ip, sp, NULL);
 }
 #endif
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 8cb9b211611d914181b2e9c94a2b083c8d5ab186..3524db5e4fa014a4594465f849d898a030bfb7b8 100644 (file)
@@ -214,7 +214,22 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
-       prepare_ftrace_return(&arch_ftrace_regs(fregs)->ra, ip, arch_ftrace_regs(fregs)->s0);
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       unsigned long frame_pointer = arch_ftrace_regs(fregs)->s0;
+       unsigned long *parent = &arch_ftrace_regs(fregs)->ra;
+       unsigned long old;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       /*
+        * We don't suffer access faults, so no extra fault-recovery assembly
+        * is needed here.
+        */
+       old = *parent;
+
+       if (!function_graph_enter_regs(old, ip, frame_pointer, parent, fregs))
+               *parent = return_hooker;
 }
 #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
 extern void ftrace_graph_call(void);
index 33f50c80f4812f4b817d954963b58a1c191acef3..166bc0ea3bdff9d949600682fd01c2eb727696f9 100644 (file)
@@ -607,15 +607,8 @@ int ftrace_disable_ftrace_graph_caller(void)
 }
 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
 
-/*
- * Hook the return address and push it in the stack of return addrs
- * in current thread info.
- */
-void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
-                          unsigned long frame_pointer)
+static inline bool skip_ftrace_return(void)
 {
-       unsigned long return_hooker = (unsigned long)&return_to_handler;
-
        /*
         * When resuming from suspend-to-ram, this function can be indirectly
         * called from early CPU startup code while the CPU is in real mode,
@@ -625,13 +618,27 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
         * This check isn't as accurate as virt_addr_valid(), but it should be
         * good enough for this purpose, and it's fast.
         */
-       if (unlikely((long)__builtin_frame_address(0) >= 0))
-               return;
+       if ((long)__builtin_frame_address(0) >= 0)
+               return true;
 
-       if (unlikely(ftrace_graph_is_dead()))
-               return;
+       if (ftrace_graph_is_dead())
+               return true;
+
+       if (atomic_read(&current->tracing_graph_pause))
+               return true;
+       return false;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
 
-       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+       if (unlikely(skip_ftrace_return()))
                return;
 
        if (!function_graph_enter(*parent, ip, frame_pointer, parent))
@@ -644,8 +651,15 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
 {
        struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
        unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       unsigned long *parent = (unsigned long *)stack;
+
+       if (unlikely(skip_ftrace_return()))
+               return;
+
 
-       prepare_ftrace_return(ip, (unsigned long *)stack, 0);
+       if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
+               *parent = return_hooker;
 }
 #endif
 
index aa9ddd1e4bb6ab14c2131cb6a9d64551d2aba708..c86ac786da3d6a5e57df0c3e44c1b1759c9c0a9e 100644 (file)
@@ -1071,10 +1071,12 @@ struct fgraph_ops;
 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
                                       struct fgraph_ops *); /* return */
 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
-                                     struct fgraph_ops *); /* entry */
+                                     struct fgraph_ops *,
+                                     struct ftrace_regs *); /* entry */
 
 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
-                                  struct fgraph_ops *gops);
+                                  struct fgraph_ops *gops,
+                                  struct ftrace_regs *fregs);
 bool ftrace_pids_enabled(struct ftrace_ops *ops);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -1114,8 +1116,15 @@ struct ftrace_ret_stack {
 extern void return_to_handler(void);
 
 extern int
-function_graph_enter(unsigned long ret, unsigned long func,
-                    unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter_regs(unsigned long ret, unsigned long func,
+                         unsigned long frame_pointer, unsigned long *retp,
+                         struct ftrace_regs *fregs);
+
+static inline int function_graph_enter(unsigned long ret, unsigned long func,
+                                      unsigned long fp, unsigned long *retp)
+{
+       return function_graph_enter_regs(ret, func, fp, retp, NULL);
+}
 
 struct ftrace_ret_stack *
 ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
index 5c68d61091192338a9f6e943ed67a4ff889244d0..4791fd704e28ab658c5f78f0cf04f35159549aee 100644 (file)
@@ -292,7 +292,8 @@ static inline unsigned long make_data_type_val(int idx, int size, int offset)
 }
 
 /* ftrace_graph_entry set to this to tell some archs to run function graph */
-static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops)
+static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops,
+                    struct ftrace_regs *fregs)
 {
        return 0;
 }
@@ -520,7 +521,8 @@ int __weak ftrace_disable_ftrace_graph_caller(void)
 #endif
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
-                           struct fgraph_ops *gops)
+                           struct fgraph_ops *gops,
+                           struct ftrace_regs *fregs)
 {
        return 0;
 }
@@ -644,8 +646,9 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
 #endif
 
 /* If the caller does not use ftrace, call this function. */
-int function_graph_enter(unsigned long ret, unsigned long func,
-                        unsigned long frame_pointer, unsigned long *retp)
+int function_graph_enter_regs(unsigned long ret, unsigned long func,
+                             unsigned long frame_pointer, unsigned long *retp,
+                             struct ftrace_regs *fregs)
 {
        struct ftrace_graph_ent trace;
        unsigned long bitmap = 0;
@@ -668,7 +671,7 @@ int function_graph_enter(unsigned long ret, unsigned long func,
        if (static_branch_likely(&fgraph_do_direct)) {
                int save_curr_ret_stack = current->curr_ret_stack;
 
-               if (static_call(fgraph_func)(&trace, fgraph_direct_gops))
+               if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs))
                        bitmap |= BIT(fgraph_direct_gops->idx);
                else
                        /* Clear out any saved storage */
@@ -686,7 +689,7 @@ int function_graph_enter(unsigned long ret, unsigned long func,
 
                        save_curr_ret_stack = current->curr_ret_stack;
                        if (ftrace_ops_test(&gops->ops, func, NULL) &&
-                           gops->entryfunc(&trace, gops))
+                           gops->entryfunc(&trace, gops, fregs))
                                bitmap |= BIT(i);
                        else
                                /* Clear out any saved storage */
@@ -1180,7 +1183,8 @@ void ftrace_graph_exit_task(struct task_struct *t)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 static int fgraph_pid_func(struct ftrace_graph_ent *trace,
-                          struct fgraph_ops *gops)
+                          struct fgraph_ops *gops,
+                          struct ftrace_regs *fregs)
 {
        struct trace_array *tr = gops->ops.private;
        int pid;
@@ -1194,7 +1198,7 @@ static int fgraph_pid_func(struct ftrace_graph_ent *trace,
                        return 0;
        }
 
-       return gops->saved_func(trace, gops);
+       return gops->saved_func(trace, gops, fregs);
 }
 
 void fgraph_update_pid_func(void)
index 6ebc76bafd38851bfefd9a50e6dd94ceff27c42a..ae29e1c4177d99ffbf174a2a4d625af7a65063b8 100644 (file)
@@ -819,7 +819,8 @@ struct profile_fgraph_data {
 };
 
 static int profile_graph_entry(struct ftrace_graph_ent *trace,
-                              struct fgraph_ops *gops)
+                              struct fgraph_ops *gops,
+                              struct ftrace_regs *fregs)
 {
        struct profile_fgraph_data *profile_data;
 
index 9691b47b5f3da297598f00736bab697487f9a201..0f38f36a5a8a93bf1bdc255c4d6926e017476c96 100644 (file)
@@ -694,7 +694,8 @@ void trace_default_header(struct seq_file *m);
 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 
 void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
-int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
+int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
+                     struct ftrace_regs *fregs);
 
 void tracing_start_cmdline_record(void);
 void tracing_stop_cmdline_record(void);
index f513603d7df97fc58058ed3f51f4656752be0b6c..676cf3e38f516edfbbdea18cc797537d01e39037 100644 (file)
@@ -175,7 +175,8 @@ struct fgraph_times {
 };
 
 int trace_graph_entry(struct ftrace_graph_ent *trace,
-                     struct fgraph_ops *gops)
+                     struct fgraph_ops *gops,
+                     struct ftrace_regs *fregs)
 {
        unsigned long *task_var = fgraph_get_task_var(gops);
        struct trace_array *tr = gops->private;
index fce064e205706f50e3b64f345b53c0f23bb226ee..ad739d76fc8621189cee05826fac2faf1862b224 100644 (file)
@@ -176,7 +176,8 @@ static int irqsoff_display_graph(struct trace_array *tr, int set)
 }
 
 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
-                              struct fgraph_ops *gops)
+                              struct fgraph_ops *gops,
+                              struct ftrace_regs *fregs)
 {
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
index d6c7f18daa15abbeb3cc368946aba76b000e8df8..0d9e1075d815f1a801ce5f995d1054a97a748273 100644 (file)
@@ -113,7 +113,8 @@ static int wakeup_display_graph(struct trace_array *tr, int set)
 }
 
 static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
-                             struct fgraph_ops *gops)
+                             struct fgraph_ops *gops,
+                             struct ftrace_regs *fregs)
 {
        struct trace_array *tr = wakeup_trace;
        struct trace_array_cpu *data;
index 38b5754790c95e7a7cb2fe63eb1e65027e56e4ee..f54493f8783d2cb5ce80164cfb5ad05a0ddac712 100644 (file)
@@ -774,7 +774,8 @@ struct fgraph_fixture {
 };
 
 static __init int store_entry(struct ftrace_graph_ent *trace,
-                             struct fgraph_ops *gops)
+                             struct fgraph_ops *gops,
+                             struct ftrace_regs *fregs)
 {
        struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
        const char *type = fixture->store_type_name;
@@ -1025,7 +1026,8 @@ static unsigned int graph_hang_thresh;
 
 /* Wrap the real function entry probe to avoid possible hanging */
 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
-                                     struct fgraph_ops *gops)
+                                     struct fgraph_ops *gops,
+                                     struct ftrace_regs *fregs)
 {
        /* This is harmlessly racy, we want to approximately detect a hang */
        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
@@ -1039,7 +1041,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
                return 0;
        }
 
-       return trace_graph_entry(trace, gops);
+       return trace_graph_entry(trace, gops, fregs);
 }
 
 static struct fgraph_ops fgraph_ops __initdata  = {