--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -278,6 +278,12 @@ static void update_global_ops(void)
- global_ops.func = func;
+@@ -297,6 +297,12 @@ static void ftrace_sync_ipi(void *data)
+ smp_rmb();
}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void update_ftrace_function(void)
{
ftrace_func_t func;
-@@ -325,6 +331,8 @@ static int remove_ftrace_ops(struct ftra
- {
- struct ftrace_ops **p;
+@@ -329,6 +335,8 @@ static void update_ftrace_function(void)
+ if (ftrace_trace_function == func)
+ return;
+ update_function_graph_func();
+
/*
- * If we are removing the last function, then simply point
- * to the ftrace_stub.
-@@ -4862,6 +4870,7 @@ int ftrace_graph_entry_stub(struct ftrac
+ * If we are using the list function, it doesn't care
+ * about the function_trace_ops.
+@@ -4925,6 +4933,7 @@ int ftrace_graph_entry_stub(struct ftrac
trace_func_graph_ret_t ftrace_graph_return =
(trace_func_graph_ret_t)ftrace_stub;
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
-@@ -5003,6 +5012,30 @@ static struct ftrace_ops fgraph_ops __re
+@@ -5066,6 +5075,30 @@ static struct ftrace_ops fgraph_ops __re
FTRACE_OPS_FL_RECURSION_SAFE,
};
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
-@@ -5027,7 +5060,16 @@ int register_ftrace_graph(trace_func_gra
+@@ -5090,7 +5123,16 @@ int register_ftrace_graph(trace_func_gra
}
ftrace_graph_return = retfunc;
ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
-@@ -5046,6 +5088,7 @@ void unregister_ftrace_graph(void)
+@@ -5109,6 +5151,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_active--;
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
--- /dev/null
+From 405e1d834807e51b2ebd3dea81cb51e53fb61504 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Fri, 8 Nov 2013 14:17:30 -0500
+Subject: ftrace: Synchronize setting function_trace_op with ftrace_trace_function
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 405e1d834807e51b2ebd3dea81cb51e53fb61504 upstream.
+
+ftrace_trace_function is a variable that holds what function will be called
+directly by the assembly code (mcount). If just a single function is
+registered and it handles recursion itself, then the assembly will call that
+function directly without any helper function. It also passes in the
+ftrace_op that was registered with the callback. The ftrace_op to send is
+stored in the function_trace_op variable.
+
+The ftrace_trace_function and function_trace_op needs to be coordinated such
+that the called callback wont be called with the wrong ftrace_op, otherwise
+bad things can happen if it expected a different op. Luckily, there's no
+callback that doesn't use the helper functions that requires this. But
+there soon will be and this needs to be fixed.
+
+Use a set_function_trace_op to store the ftrace_op to set the
+function_trace_op to when it is safe to do so (during the update function
+within the breakpoint or stop machine calls). Or if dynamic ftrace is not
+being used (static tracing) then we have to do a bit more synchronization
+when the ftrace_trace_function is set as that takes affect immediately
+(as oppose to dynamic ftrace doing it with the modification of the trampoline).
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 87 +++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 72 insertions(+), 15 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
+
+ /* Current function tracing op */
+ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
++/* What to set function_trace_op to */
++static struct ftrace_ops *set_function_trace_op;
+
+ /* List for set_ftrace_pid's pids. */
+ LIST_HEAD(ftrace_pids);
+@@ -278,6 +280,23 @@ static void update_global_ops(void)
+ global_ops.func = func;
+ }
+
++static void ftrace_sync(struct work_struct *work)
++{
++ /*
++ * This function is just a stub to implement a hard force
++ * of synchronize_sched(). This requires synchronizing
++ * tasks even in userspace and idle.
++ *
++ * Yes, function tracing is rude.
++ */
++}
++
++static void ftrace_sync_ipi(void *data)
++{
++ /* Probably not needed, but do it anyway */
++ smp_rmb();
++}
++
+ static void update_ftrace_function(void)
+ {
+ ftrace_func_t func;
+@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
+ !FTRACE_FORCE_LIST_FUNC)) {
+ /* Set the ftrace_ops that the arch callback uses */
+ if (ftrace_ops_list == &global_ops)
+- function_trace_op = ftrace_global_list;
++ set_function_trace_op = ftrace_global_list;
+ else
+- function_trace_op = ftrace_ops_list;
++ set_function_trace_op = ftrace_ops_list;
+ func = ftrace_ops_list->func;
+ } else {
+ /* Just use the default ftrace_ops */
+- function_trace_op = &ftrace_list_end;
++ set_function_trace_op = &ftrace_list_end;
+ func = ftrace_ops_list_func;
+ }
+
++ /* If there's no change, then do nothing more here */
++ if (ftrace_trace_function == func)
++ return;
++
++ /*
++ * If we are using the list function, it doesn't care
++ * about the function_trace_ops.
++ */
++ if (func == ftrace_ops_list_func) {
++ ftrace_trace_function = func;
++ /*
++ * Don't even bother setting function_trace_ops,
++ * it would be racy to do so anyway.
++ */
++ return;
++ }
++
++#ifndef CONFIG_DYNAMIC_FTRACE
++ /*
++ * For static tracing, we need to be a bit more careful.
++ * The function change takes affect immediately. Thus,
++ * we need to coorditate the setting of the function_trace_ops
++ * with the setting of the ftrace_trace_function.
++ *
++ * Set the function to the list ops, which will call the
++ * function we want, albeit indirectly, but it handles the
++ * ftrace_ops and doesn't depend on function_trace_op.
++ */
++ ftrace_trace_function = ftrace_ops_list_func;
++ /*
++ * Make sure all CPUs see this. Yes this is slow, but static
++ * tracing is slow and nasty to have enabled.
++ */
++ schedule_on_each_cpu(ftrace_sync);
++ /* Now all cpus are using the list ops. */
++ function_trace_op = set_function_trace_op;
++ /* Make sure the function_trace_op is visible on all CPUs */
++ smp_wmb();
++ /* Nasty way to force a rmb on all cpus */
++ smp_call_function(ftrace_sync_ipi, NULL, 1);
++ /* OK, we are all set to update the ftrace_trace_function now! */
++#endif /* !CONFIG_DYNAMIC_FTRACE */
++
+ ftrace_trace_function = func;
+ }
+
+@@ -410,17 +472,6 @@ static int __register_ftrace_function(st
+ return 0;
+ }
+
+-static void ftrace_sync(struct work_struct *work)
+-{
+- /*
+- * This function is just a stub to implement a hard force
+- * of synchronize_sched(). This requires synchronizing
+- * tasks even in userspace and idle.
+- *
+- * Yes, function tracing is rude.
+- */
+-}
+-
+ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ {
+ int ret;
+@@ -1992,8 +2043,14 @@ void ftrace_modify_all_code(int command)
+ else if (command & FTRACE_DISABLE_CALLS)
+ ftrace_replace_code(0);
+
+- if (update && ftrace_trace_function != ftrace_ops_list_func)
++ if (update && ftrace_trace_function != ftrace_ops_list_func) {
++ function_trace_op = set_function_trace_op;
++ smp_wmb();
++ /* If irqs are disabled, we are in stop machine */
++ if (!irqs_disabled())
++ smp_call_function(ftrace_sync_ipi, NULL, 1);
+ ftrace_update_ftrace_func(ftrace_trace_function);
++ }
+
+ if (command & FTRACE_START_FUNC_RET)
+ ftrace_enable_ftrace_graph_caller();