]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
function_graph: Add pid tracing back to function graph tracer
authorSteven Rostedt (Google) <rostedt@goodmis.org>
Mon, 3 Jun 2024 19:07:17 +0000 (15:07 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Tue, 4 Jun 2024 14:37:11 +0000 (10:37 -0400)
Now that the function_graph has a main callback that handles the function
graph subops tracing, it no longer honors the pid filtering of ftrace. Add
back this logic in the function_graph code to update the gops callback for
the entry function to test if it should trace the current task or not.

Link: https://lore.kernel.org/linux-trace-kernel/20240603190822.991720703@goodmis.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Guo Ren <guoren@kernel.org>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
include/linux/ftrace.h
kernel/trace/fgraph.c
kernel/trace/ftrace.c
kernel/trace/ftrace_internal.h

index 8f865689e868ac0acec3ff89d185fccd4b99a2a3..e31ec8516de188193ceb9c5a8351d608c8fd0b8b 100644 (file)
@@ -1040,6 +1040,7 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
                                      struct fgraph_ops *); /* entry */
 
 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
+bool ftrace_pids_enabled(struct ftrace_ops *ops);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
@@ -1048,6 +1049,7 @@ struct fgraph_ops {
        trace_func_graph_ret_t          retfunc;
        struct ftrace_ops               ops; /* for the hash lists */
        void                            *private;
+       trace_func_graph_ent_t          saved_func;
        int                             idx;
 };
 
index 3ef6db53c0bf864469fd76827c71b79a5148a015..30bed20c655f1af4d841eb3afe0a746d1e4d9740 100644 (file)
@@ -854,6 +854,41 @@ void ftrace_graph_exit_task(struct task_struct *t)
        kfree(ret_stack);
 }
 
+static int fgraph_pid_func(struct ftrace_graph_ent *trace,
+                          struct fgraph_ops *gops)
+{
+       struct trace_array *tr = gops->ops.private;
+       int pid;
+
+       if (tr) {
+               pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+               if (pid == FTRACE_PID_IGNORE)
+                       return 0;
+               if (pid != FTRACE_PID_TRACE &&
+                   pid != current->pid)
+                       return 0;
+       }
+
+       return gops->saved_func(trace, gops);
+}
+
+void fgraph_update_pid_func(void)
+{
+       struct fgraph_ops *gops;
+       struct ftrace_ops *op;
+
+       if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
+               return;
+
+       list_for_each_entry(op, &graph_ops.subop_list, list) {
+               if (op->flags & FTRACE_OPS_FL_PID) {
+                       gops = container_of(op, struct fgraph_ops, ops);
+                       gops->entryfunc = ftrace_pids_enabled(op) ?
+                               fgraph_pid_func : gops->saved_func;
+               }
+       }
+}
+
 /* Allocate a return stack for each task */
 static int start_graph_tracing(void)
 {
@@ -931,11 +966,15 @@ int register_ftrace_graph(struct fgraph_ops *gops)
                command = FTRACE_START_FUNC_RET;
        }
 
+       /* Always save the function, and reset at unregistering */
+       gops->saved_func = gops->entryfunc;
+
        ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
 error:
        if (ret) {
                fgraph_array[i] = &fgraph_stub;
                ftrace_graph_active--;
+               gops->saved_func = NULL;
        }
 out:
        mutex_unlock(&ftrace_lock);
@@ -979,5 +1018,6 @@ void unregister_ftrace_graph(struct fgraph_ops *gops)
                unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
        }
  out:
+       gops->saved_func = NULL;
        mutex_unlock(&ftrace_lock);
 }
index 58e0f4bc02419a9c7df78691c45d6602aadafb3c..da7e6abf48b4b1c847526d9b95f30fa22315e90c 100644 (file)
@@ -100,7 +100,7 @@ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 /* What to set function_trace_op to */
 static struct ftrace_ops *set_function_trace_op;
 
-static bool ftrace_pids_enabled(struct ftrace_ops *ops)
+bool ftrace_pids_enabled(struct ftrace_ops *ops)
 {
        struct trace_array *tr;
 
@@ -402,10 +402,11 @@ static void ftrace_update_pid_func(void)
                if (op->flags & FTRACE_OPS_FL_PID) {
                        op->func = ftrace_pids_enabled(op) ?
                                ftrace_pid_func : op->saved_func;
-                       ftrace_update_trampoline(op);
                }
        } while_for_each_ftrace_op(op);
 
+       fgraph_update_pid_func();
+
        update_ftrace_function();
 }
 
index cdfd12c44ab47cdb939af01a692a1ae3b03c4a10..bfba10c2fcf19b3d0554814846342814912c043c 100644 (file)
@@ -43,8 +43,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 extern int ftrace_graph_active;
+extern void fgraph_update_pid_func(void);
 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
 # define ftrace_graph_active 0
+static inline void fgraph_update_pid_func(void) {}
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 #else /* !CONFIG_FUNCTION_TRACER */