]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2014 01:20:42 +0000 (17:20 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2014 01:20:42 +0000 (17:20 -0800)
added patches:
ftrace-fix-synchronization-location-disabling-and-freeing-ftrace_ops.patch
ftrace-synchronize-setting-function_trace_op-with-ftrace_trace_function.patch

queue-3.10/ftrace-fix-synchronization-location-disabling-and-freeing-ftrace_ops.patch [new file with mode: 0644]
queue-3.10/ftrace-have-function-graph-only-trace-based-on-global_ops-filters.patch
queue-3.10/ftrace-synchronize-setting-function_trace_op-with-ftrace_trace_function.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/ftrace-fix-synchronization-location-disabling-and-freeing-ftrace_ops.patch b/queue-3.10/ftrace-fix-synchronization-location-disabling-and-freeing-ftrace_ops.patch
new file mode 100644 (file)
index 0000000..ba0ca24
--- /dev/null
@@ -0,0 +1,106 @@
+From rostedt@goodmis.org  Mon Feb 10 16:45:29 2014
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Fri, 7 Feb 2014 14:42:01 -0500
+Subject: ftrace: Fix synchronization location disabling and freeing ftrace_ops
+To: Luis Henriques <luis.henriques@canonical.com>
+Cc: gregkh@linuxfoundation.org, stable@vger.kernel.org, stable-commits@vger.kernel.org
+Message-ID: <20140207144201.38d64ed8@gandalf.local.home>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit a4c35ed241129dd142be4cadb1e5a474a56d5464 upstream.
+
+The synchronization needed after ftrace_ops are unregistered must happen
+after the callback is disabled from becing called by functions.
+
+The current location happens after the function is being removed from the
+internal lists, but not after the function callbacks were disabled, leaving
+the functions susceptible of being called after their callbacks are freed.
+
+This affects perf and any externel users of function tracing (LTTng and
+SystemTap).
+
+Fixes: cdbe61bfe704 "ftrace: Allow dynamically allocated function tracers"
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c |   50 ++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 32 insertions(+), 18 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -490,16 +490,6 @@ static int __unregister_ftrace_function(
+       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+               ret = remove_ftrace_list_ops(&ftrace_control_list,
+                                            &control_ops, ops);
+-              if (!ret) {
+-                      /*
+-                       * The ftrace_ops is now removed from the list,
+-                       * so there'll be no new users. We must ensure
+-                       * all current users are done before we free
+-                       * the control data.
+-                       */
+-                      synchronize_sched();
+-                      control_ops_free(ops);
+-              }
+       } else
+               ret = remove_ftrace_ops(&ftrace_ops_list, ops);
+@@ -509,13 +499,6 @@ static int __unregister_ftrace_function(
+       if (ftrace_enabled)
+               update_ftrace_function();
+-      /*
+-       * Dynamic ops may be freed, we must make sure that all
+-       * callers are done before leaving this function.
+-       */
+-      if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+-              synchronize_sched();
+-
+       return 0;
+ }
+@@ -2184,10 +2167,41 @@ static int ftrace_shutdown(struct ftrace
+               command |= FTRACE_UPDATE_TRACE_FUNC;
+       }
+-      if (!command || !ftrace_enabled)
++      if (!command || !ftrace_enabled) {
++              /*
++               * If these are control ops, they still need their
++               * per_cpu field freed. Since, function tracing is
++               * not currently active, we can just free them
++               * without synchronizing all CPUs.
++               */
++              if (ops->flags & FTRACE_OPS_FL_CONTROL)
++                      control_ops_free(ops);
+               return 0;
++      }
+       ftrace_run_update_code(command);
++
++      /*
++       * Dynamic ops may be freed, we must make sure that all
++       * callers are done before leaving this function.
++       * The same goes for freeing the per_cpu data of the control
++       * ops.
++       *
++       * Again, normal synchronize_sched() is not good enough.
++       * We need to do a hard force of sched synchronization.
++       * This is because we use preempt_disable() to do RCU, but
++       * the function tracers can be called where RCU is not watching
++       * (like before user_exit()). We can not rely on the RCU
++       * infrastructure to do the synchronization, thus we must do it
++       * ourselves.
++       */
++      if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
++              schedule_on_each_cpu(ftrace_sync);
++
++              if (ops->flags & FTRACE_OPS_FL_CONTROL)
++                      control_ops_free(ops);
++      }
++
+       return 0;
+ }
index 7826b4642ad1b1cb605db7a5c617c47a300f3d77..a7eae43e6ecaf73063fc95fd0a1e0b50dbefc03a 100644 (file)
@@ -1,9 +1,12 @@
-From 23a8e8441a0a74dd612edf81dc89d1600bc0a3d1 Mon Sep 17 00:00:00 2001
-From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
-Date: Mon, 13 Jan 2014 10:30:23 -0500
-Subject: ftrace: Have function graph only trace based on global_ops filters
+From rostedt@goodmis.org  Mon Feb 10 16:48:15 2014
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Fri, 7 Feb 2014 14:42:35 -0500
+Subject: ftrace: Have function graph only trace based on  global_ops filters
+To: Luis Henriques <luis.henriques@canonical.com>
+Cc: gregkh@linuxfoundation.org, stable@vger.kernel.org, stable-commits@vger.kernel.org
+Message-ID: <20140207144235.36f44ec8@gandalf.local.home>
 
-From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+From: Steven Rostedt <rostedt@goodmis.org>
 
 commit 23a8e8441a0a74dd612edf81dc89d1600bc0a3d1 upstream.
 
@@ -88,15 +91,14 @@ and not go through the test trampoline.
 Fixes: d2d45c7a03a2 "tracing: Have stack_tracer use a separate list of functions"
 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
 ---
  kernel/trace/ftrace.c |   45 ++++++++++++++++++++++++++++++++++++++++++++-
  1 file changed, 44 insertions(+), 1 deletion(-)
 
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
-@@ -278,6 +278,12 @@ static void update_global_ops(void)
-       global_ops.func = func;
+@@ -297,6 +297,12 @@ static void ftrace_sync_ipi(void *data)
+       smp_rmb();
  }
  
 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -108,16 +110,16 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  static void update_ftrace_function(void)
  {
        ftrace_func_t func;
-@@ -325,6 +331,8 @@ static int remove_ftrace_ops(struct ftra
- {
-       struct ftrace_ops **p;
+@@ -329,6 +335,8 @@ static void update_ftrace_function(void)
+       if (ftrace_trace_function == func)
+               return;
  
 +      update_function_graph_func();
 +
        /*
-        * If we are removing the last function, then simply point
-        * to the ftrace_stub.
-@@ -4728,6 +4736,7 @@ int ftrace_graph_entry_stub(struct ftrac
+        * If we are using the list function, it doesn't care
+        * about the function_trace_ops.
+@@ -4810,6 +4818,7 @@ int ftrace_graph_entry_stub(struct ftrac
  trace_func_graph_ret_t ftrace_graph_return =
                        (trace_func_graph_ret_t)ftrace_stub;
  trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
@@ -125,7 +127,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
  /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
-@@ -4869,6 +4878,30 @@ static struct ftrace_ops fgraph_ops __re
+@@ -4951,6 +4960,30 @@ static struct ftrace_ops fgraph_ops __re
                                FTRACE_OPS_FL_RECURSION_SAFE,
  };
  
@@ -156,7 +158,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                        trace_func_graph_ent_t entryfunc)
  {
-@@ -4893,7 +4926,16 @@ int register_ftrace_graph(trace_func_gra
+@@ -4975,7 +5008,16 @@ int register_ftrace_graph(trace_func_gra
        }
  
        ftrace_graph_return = retfunc;
@@ -174,7 +176,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  
        ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
  
-@@ -4912,6 +4954,7 @@ void unregister_ftrace_graph(void)
+@@ -4994,6 +5036,7 @@ void unregister_ftrace_graph(void)
        ftrace_graph_active--;
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
diff --git a/queue-3.10/ftrace-synchronize-setting-function_trace_op-with-ftrace_trace_function.patch b/queue-3.10/ftrace-synchronize-setting-function_trace_op-with-ftrace_trace_function.patch
new file mode 100644 (file)
index 0000000..9f5f4a1
--- /dev/null
@@ -0,0 +1,152 @@
+From rostedt@goodmis.org  Mon Feb 10 16:45:02 2014
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Fri, 7 Feb 2014 14:41:17 -0500
+Subject: ftrace: Synchronize setting function_trace_op with ftrace_trace_function
+To: Luis Henriques <luis.henriques@canonical.com>
+Cc: gregkh@linuxfoundation.org, stable@vger.kernel.org, stable-commits@vger.kernel.org
+Message-ID: <20140207144117.671fe030@gandalf.local.home>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 405e1d834807e51b2ebd3dea81cb51e53fb61504 upstream.
+
+ftrace_trace_function is a variable that holds what function will be called
+directly by the assembly code (mcount). If just a single function is
+registered and it handles recursion itself, then the assembly will call that
+function directly without any helper function. It also passes in the
+ftrace_op that was registered with the callback. The ftrace_op to send is
+stored in the function_trace_op variable.
+
+The ftrace_trace_function and function_trace_op needs to be coordinated such
+that the called callback wont be called with the wrong ftrace_op, otherwise
+bad things can happen if it expected a different op. Luckily, there's no
+callback that doesn't use the helper functions that requires this. But
+there soon will be and this needs to be fixed.
+
+Use a set_function_trace_op to store the ftrace_op to set the
+function_trace_op to when it is safe to do so (during the update function
+within the breakpoint or stop machine calls). Or if dynamic ftrace is not
+being used (static tracing) then we have to do a bit more synchronization
+when the ftrace_trace_function is set as that takes affect immediately
+(as oppose to dynamic ftrace doing it with the modification of the trampoline).
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ftrace.c |   76 +++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 72 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
+ /* Current function tracing op */
+ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
++/* What to set function_trace_op to */
++static struct ftrace_ops *set_function_trace_op;
+ /* List for set_ftrace_pid's pids. */
+ LIST_HEAD(ftrace_pids);
+@@ -278,6 +280,23 @@ static void update_global_ops(void)
+       global_ops.func = func;
+ }
++static void ftrace_sync(struct work_struct *work)
++{
++      /*
++       * This function is just a stub to implement a hard force
++       * of synchronize_sched(). This requires synchronizing
++       * tasks even in userspace and idle.
++       *
++       * Yes, function tracing is rude.
++       */
++}
++
++static void ftrace_sync_ipi(void *data)
++{
++      /* Probably not needed, but do it anyway */
++      smp_rmb();
++}
++
+ static void update_ftrace_function(void)
+ {
+       ftrace_func_t func;
+@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
+            !FTRACE_FORCE_LIST_FUNC)) {
+               /* Set the ftrace_ops that the arch callback uses */
+               if (ftrace_ops_list == &global_ops)
+-                      function_trace_op = ftrace_global_list;
++                      set_function_trace_op = ftrace_global_list;
+               else
+-                      function_trace_op = ftrace_ops_list;
++                      set_function_trace_op = ftrace_ops_list;
+               func = ftrace_ops_list->func;
+       } else {
+               /* Just use the default ftrace_ops */
+-              function_trace_op = &ftrace_list_end;
++              set_function_trace_op = &ftrace_list_end;
+               func = ftrace_ops_list_func;
+       }
++      /* If there's no change, then do nothing more here */
++      if (ftrace_trace_function == func)
++              return;
++
++      /*
++       * If we are using the list function, it doesn't care
++       * about the function_trace_ops.
++       */
++      if (func == ftrace_ops_list_func) {
++              ftrace_trace_function = func;
++              /*
++               * Don't even bother setting function_trace_ops,
++               * it would be racy to do so anyway.
++               */
++              return;
++      }
++
++#ifndef CONFIG_DYNAMIC_FTRACE
++      /*
++       * For static tracing, we need to be a bit more careful.
++       * The function change takes affect immediately. Thus,
++       * we need to coorditate the setting of the function_trace_ops
++       * with the setting of the ftrace_trace_function.
++       *
++       * Set the function to the list ops, which will call the
++       * function we want, albeit indirectly, but it handles the
++       * ftrace_ops and doesn't depend on function_trace_op.
++       */
++      ftrace_trace_function = ftrace_ops_list_func;
++      /*
++       * Make sure all CPUs see this. Yes this is slow, but static
++       * tracing is slow and nasty to have enabled.
++       */
++      schedule_on_each_cpu(ftrace_sync);
++      /* Now all cpus are using the list ops. */
++      function_trace_op = set_function_trace_op;
++      /* Make sure the function_trace_op is visible on all CPUs */
++      smp_wmb();
++      /* Nasty way to force a rmb on all cpus */
++      smp_call_function(ftrace_sync_ipi, NULL, 1);
++      /* OK, we are all set to update the ftrace_trace_function now! */
++#endif /* !CONFIG_DYNAMIC_FTRACE */
++
+       ftrace_trace_function = func;
+ }
+@@ -1952,8 +2014,14 @@ void ftrace_modify_all_code(int command)
+       else if (command & FTRACE_DISABLE_CALLS)
+               ftrace_replace_code(0);
+-      if (command & FTRACE_UPDATE_TRACE_FUNC)
++      if (command & FTRACE_UPDATE_TRACE_FUNC) {
++              function_trace_op = set_function_trace_op;
++              smp_wmb();
++              /* If irqs are disabled, we are in stop machine */
++              if (!irqs_disabled())
++                      smp_call_function(ftrace_sync_ipi, NULL, 1);
+               ftrace_update_ftrace_func(ftrace_trace_function);
++      }
+       if (command & FTRACE_START_FUNC_RET)
+               ftrace_enable_ftrace_graph_caller();
index 7db15cc6ceb915ea07c97d09113c263ce349c87b..c11c97486f3f365de66b6ed39b600e28bbbf9d2b 100644 (file)
@@ -1,5 +1,4 @@
 selinux-fix-memory-leak-upon-loading-policy.patch
-ftrace-have-function-graph-only-trace-based-on-global_ops-filters.patch
 tracing-have-trace-buffer-point-back-to-trace_array.patch
 tracing-check-if-tracing-is-enabled-in-trace_puts.patch
 arch-sh-kernel-kgdb.c-add-missing-include-linux-sched.h.patch
@@ -66,3 +65,6 @@ drm-mgag200-fix-typo-causing-bw-limits-to-be-ignored-on-some-chips.patch
 mfd-lpc_ich-add-support-for-intel-avoton-soc.patch
 mfd-lpc_ich-itco_wdt-patch-for-intel-coleto-creek-deviceids.patch
 i2c-i801-smbus-patch-for-intel-coleto-creek-deviceids.patch
+ftrace-synchronize-setting-function_trace_op-with-ftrace_trace_function.patch
+ftrace-fix-synchronization-location-disabling-and-freeing-ftrace_ops.patch
+ftrace-have-function-graph-only-trace-based-on-global_ops-filters.patch