--- /dev/null
+From edb096e00724f02db5f6ec7900f3bbd465c6c76f Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 1 Sep 2017 12:18:28 -0400
+Subject: ftrace: Fix memleak when unregistering dynamic ops when tracing disabled
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit edb096e00724f02db5f6ec7900f3bbd465c6c76f upstream.
+
+If function tracing is disabled by the user via the function-trace option or
+the proc sysctl file, and a ftrace_ops that was allocated on the heap is
+unregistered, then the shutdown code exits out without doing the proper
+clean up. This was found via kmemleak and running the ftrace selftests, as
+one of the tests unregisters with function tracing disabled.
+
+ # cat kmemleak
+unreferenced object 0xffffffffa0020000 (size 4096):
+ comm "swapper/0", pid 1, jiffies 4294668889 (age 569.209s)
+ hex dump (first 32 bytes):
+ 55 ff 74 24 10 55 48 89 e5 ff 74 24 18 55 48 89 U.t$.UH...t$.UH.
+ e5 48 81 ec a8 00 00 00 48 89 44 24 50 48 89 4c .H......H.D$PH.L
+ backtrace:
+ [<ffffffff81d64665>] kmemleak_vmalloc+0x85/0xf0
+ [<ffffffff81355631>] __vmalloc_node_range+0x281/0x3e0
+ [<ffffffff8109697f>] module_alloc+0x4f/0x90
+ [<ffffffff81091170>] arch_ftrace_update_trampoline+0x160/0x420
+ [<ffffffff81249947>] ftrace_startup+0xe7/0x300
+ [<ffffffff81249bd2>] register_ftrace_function+0x72/0x90
+ [<ffffffff81263786>] trace_selftest_ops+0x204/0x397
+ [<ffffffff82bb8971>] trace_selftest_startup_function+0x394/0x624
+ [<ffffffff81263a75>] run_tracer_selftest+0x15c/0x1d7
+ [<ffffffff82bb83f1>] init_trace_selftests+0x75/0x192
+ [<ffffffff81002230>] do_one_initcall+0x90/0x1e2
+ [<ffffffff82b7d620>] kernel_init_freeable+0x350/0x3fe
+ [<ffffffff81d61ec3>] kernel_init+0x13/0x122
+ [<ffffffff81d72c6a>] ret_from_fork+0x2a/0x40
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+Fixes: 12cce594fa ("ftrace/x86: Allow !CONFIG_PREEMPT dynamic ops to use allocated trampolines")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2667,13 +2667,14 @@ static int ftrace_shutdown(struct ftrace
+
+ if (!command || !ftrace_enabled) {
+ /*
+- * If these are control ops, they still need their
+- * per_cpu field freed. Since, function tracing is
++ * If these are dynamic or control ops, they still
++ * need their data freed. Since, function tracing is
+ * not currently active, we can just free them
+ * without synchronizing all CPUs.
+ */
+- if (ops->flags & FTRACE_OPS_FL_CONTROL)
+- control_ops_free(ops);
++ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
++ goto free_ops;
++
+ return 0;
+ }
+
+@@ -2728,6 +2729,7 @@ static int ftrace_shutdown(struct ftrace
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
+ schedule_on_each_cpu(ftrace_sync);
+
++ free_ops:
+ arch_ftrace_trampoline_free(ops);
+
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)