]>
Commit | Line | Data |
---|---|---|
0aa95edc GKH |
1 | From rostedt@goodmis.org Mon Feb 10 16:45:02 2014 |
2 | From: Steven Rostedt <rostedt@goodmis.org> | |
3 | Date: Fri, 7 Feb 2014 14:41:17 -0500 | |
4 | Subject: ftrace: Synchronize setting function_trace_op with ftrace_trace_function | |
5 | To: Luis Henriques <luis.henriques@canonical.com> | |
6 | Cc: gregkh@linuxfoundation.org, stable@vger.kernel.org, stable-commits@vger.kernel.org | |
7 | Message-ID: <20140207144117.671fe030@gandalf.local.home> | |
8 | ||
9 | From: Steven Rostedt <rostedt@goodmis.org> | |
10 | ||
11 | commit 405e1d834807e51b2ebd3dea81cb51e53fb61504 upstream. | |
12 | ||
13 | ftrace_trace_function is a variable that holds what function will be called | |
14 | directly by the assembly code (mcount). If just a single function is | |
15 | registered and it handles recursion itself, then the assembly will call that | |
16 | function directly without any helper function. It also passes in the | |
17 | ftrace_op that was registered with the callback. The ftrace_op to send is | |
18 | stored in the function_trace_op variable. | |
19 | ||
20 | The ftrace_trace_function and function_trace_op needs to be coordinated such | |
21 | that the called callback wont be called with the wrong ftrace_op, otherwise | |
22 | bad things can happen if it expected a different op. Luckily, there's no | |
23 | callback that doesn't use the helper functions that requires this. But | |
24 | there soon will be and this needs to be fixed. | |
25 | ||
26 | Use a set_function_trace_op to store the ftrace_op to set the | |
27 | function_trace_op to when it is safe to do so (during the update function | |
28 | within the breakpoint or stop machine calls). Or if dynamic ftrace is not | |
29 | being used (static tracing) then we have to do a bit more synchronization | |
30 | when the ftrace_trace_function is set as that takes affect immediately | |
31 | (as oppose to dynamic ftrace doing it with the modification of the trampoline). | |
32 | ||
33 | Signed-off-by: Steven Rostedt <rostedt@goodmis.org> | |
34 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
35 | --- | |
36 | kernel/trace/ftrace.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++--- | |
37 | 1 file changed, 72 insertions(+), 4 deletions(-) | |
38 | ||
39 | --- a/kernel/trace/ftrace.c | |
40 | +++ b/kernel/trace/ftrace.c | |
41 | @@ -85,6 +85,8 @@ int function_trace_stop __read_mostly; | |
42 | ||
43 | /* Current function tracing op */ | |
44 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | |
45 | +/* What to set function_trace_op to */ | |
46 | +static struct ftrace_ops *set_function_trace_op; | |
47 | ||
48 | /* List for set_ftrace_pid's pids. */ | |
49 | LIST_HEAD(ftrace_pids); | |
50 | @@ -278,6 +280,23 @@ static void update_global_ops(void) | |
51 | global_ops.func = func; | |
52 | } | |
53 | ||
54 | +static void ftrace_sync(struct work_struct *work) | |
55 | +{ | |
56 | + /* | |
57 | + * This function is just a stub to implement a hard force | |
58 | + * of synchronize_sched(). This requires synchronizing | |
59 | + * tasks even in userspace and idle. | |
60 | + * | |
61 | + * Yes, function tracing is rude. | |
62 | + */ | |
63 | +} | |
64 | + | |
65 | +static void ftrace_sync_ipi(void *data) | |
66 | +{ | |
67 | + /* Probably not needed, but do it anyway */ | |
68 | + smp_rmb(); | |
69 | +} | |
70 | + | |
71 | static void update_ftrace_function(void) | |
72 | { | |
73 | ftrace_func_t func; | |
74 | @@ -296,16 +315,59 @@ static void update_ftrace_function(void) | |
75 | !FTRACE_FORCE_LIST_FUNC)) { | |
76 | /* Set the ftrace_ops that the arch callback uses */ | |
77 | if (ftrace_ops_list == &global_ops) | |
78 | - function_trace_op = ftrace_global_list; | |
79 | + set_function_trace_op = ftrace_global_list; | |
80 | else | |
81 | - function_trace_op = ftrace_ops_list; | |
82 | + set_function_trace_op = ftrace_ops_list; | |
83 | func = ftrace_ops_list->func; | |
84 | } else { | |
85 | /* Just use the default ftrace_ops */ | |
86 | - function_trace_op = &ftrace_list_end; | |
87 | + set_function_trace_op = &ftrace_list_end; | |
88 | func = ftrace_ops_list_func; | |
89 | } | |
90 | ||
91 | + /* If there's no change, then do nothing more here */ | |
92 | + if (ftrace_trace_function == func) | |
93 | + return; | |
94 | + | |
95 | + /* | |
96 | + * If we are using the list function, it doesn't care | |
97 | + * about the function_trace_ops. | |
98 | + */ | |
99 | + if (func == ftrace_ops_list_func) { | |
100 | + ftrace_trace_function = func; | |
101 | + /* | |
102 | + * Don't even bother setting function_trace_ops, | |
103 | + * it would be racy to do so anyway. | |
104 | + */ | |
105 | + return; | |
106 | + } | |
107 | + | |
108 | +#ifndef CONFIG_DYNAMIC_FTRACE | |
109 | + /* | |
110 | + * For static tracing, we need to be a bit more careful. | |
111 | + * The function change takes affect immediately. Thus, | |
112 | + * we need to coorditate the setting of the function_trace_ops | |
113 | + * with the setting of the ftrace_trace_function. | |
114 | + * | |
115 | + * Set the function to the list ops, which will call the | |
116 | + * function we want, albeit indirectly, but it handles the | |
117 | + * ftrace_ops and doesn't depend on function_trace_op. | |
118 | + */ | |
119 | + ftrace_trace_function = ftrace_ops_list_func; | |
120 | + /* | |
121 | + * Make sure all CPUs see this. Yes this is slow, but static | |
122 | + * tracing is slow and nasty to have enabled. | |
123 | + */ | |
124 | + schedule_on_each_cpu(ftrace_sync); | |
125 | + /* Now all cpus are using the list ops. */ | |
126 | + function_trace_op = set_function_trace_op; | |
127 | + /* Make sure the function_trace_op is visible on all CPUs */ | |
128 | + smp_wmb(); | |
129 | + /* Nasty way to force a rmb on all cpus */ | |
130 | + smp_call_function(ftrace_sync_ipi, NULL, 1); | |
131 | + /* OK, we are all set to update the ftrace_trace_function now! */ | |
132 | +#endif /* !CONFIG_DYNAMIC_FTRACE */ | |
133 | + | |
134 | ftrace_trace_function = func; | |
135 | } | |
136 | ||
137 | @@ -1952,8 +2014,14 @@ void ftrace_modify_all_code(int command) | |
138 | else if (command & FTRACE_DISABLE_CALLS) | |
139 | ftrace_replace_code(0); | |
140 | ||
141 | - if (command & FTRACE_UPDATE_TRACE_FUNC) | |
142 | + if (command & FTRACE_UPDATE_TRACE_FUNC) { | |
143 | + function_trace_op = set_function_trace_op; | |
144 | + smp_wmb(); | |
145 | + /* If irqs are disabled, we are in stop machine */ | |
146 | + if (!irqs_disabled()) | |
147 | + smp_call_function(ftrace_sync_ipi, NULL, 1); | |
148 | ftrace_update_ftrace_func(ftrace_trace_function); | |
149 | + } | |
150 | ||
151 | if (command & FTRACE_START_FUNC_RET) | |
152 | ftrace_enable_ftrace_graph_caller(); |