--- /dev/null
+From aaf6ac0f0871cb7fc0f28f3a00edf329bc7adc29 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung.kim@lge.com>
+Date: Fri, 7 Jun 2013 15:07:48 +0900
+Subject: tracing: Do not call kmem_cache_free() on allocation failure
+
+From: Namhyung Kim <namhyung.kim@lge.com>
+
+commit aaf6ac0f0871cb7fc0f28f3a00edf329bc7adc29 upstream.
+
+There's no point calling it when _alloc() failed.
+
+Link: http://lkml.kernel.org/r/1370585268-29169-1-git-send-email-namhyung@kernel.org
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -114,7 +114,7 @@ static int __trace_define_field(struct l
+
+ field = kmem_cache_alloc(field_cachep, GFP_TRACE);
+ if (!field)
+- goto err;
++ return -ENOMEM;
+
+ field->name = name;
+ field->type = type;
+@@ -131,11 +131,6 @@ static int __trace_define_field(struct l
+ list_add(&field->link, head);
+
+ return 0;
+-
+-err:
+- kmem_cache_free(field_cachep, field);
+-
+- return -ENOMEM;
+ }
+
+ int trace_define_field(struct ftrace_event_call *call, const char *type,
--- /dev/null
+From a232e270dcb55a70ad3241bc6fc160fd9b5c9e6c Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Date: Tue, 9 Jul 2013 18:35:26 +0900
+Subject: tracing/kprobe: Wait for disabling all running kprobe handlers
+
+From: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+
+commit a232e270dcb55a70ad3241bc6fc160fd9b5c9e6c upstream.
+
+Wait for disabling all running kprobe handlers when a kprobe
+event is disabled, since the caller, trace_remove_event_call()
+supposes that a removing event is disabled completely by
+disabling the event.
+With this change, ftrace can ensure that there is no running
+event handlers after disabling it.
+
+Link: http://lkml.kernel.org/r/20130709093526.20138.93100.stgit@mhiramat-M0-7522
+
+Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_kprobe.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -281,6 +281,8 @@ trace_probe_file_index(struct trace_prob
+ static int
+ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
+ {
++ struct ftrace_event_file **old = NULL;
++ int wait = 0;
+ int ret = 0;
+
+ mutex_lock(&probe_enable_lock);
+@@ -314,10 +316,7 @@ disable_trace_probe(struct trace_probe *
+ }
+
+ rcu_assign_pointer(tp->files, new);
+-
+- /* Make sure the probe is done with old files */
+- synchronize_sched();
+- kfree(old);
++ wait = 1;
+ } else
+ tp->flags &= ~TP_FLAG_PROFILE;
+
+@@ -326,11 +325,25 @@ disable_trace_probe(struct trace_probe *
+ disable_kretprobe(&tp->rp);
+ else
+ disable_kprobe(&tp->rp.kp);
++ wait = 1;
+ }
+
+ out_unlock:
+ mutex_unlock(&probe_enable_lock);
+
++ if (wait) {
++ /*
++ * Synchronize with kprobe_trace_func/kretprobe_trace_func
++ * to ensure disabled (all running handlers are finished).
++ * This is not only for kfree(), but also the caller,
++ * trace_remove_event_call() supposes it for releasing
++ * event_call related objects, which will be accessed in
++ * the kprobe_trace_func/kretprobe_trace_func.
++ */
++ synchronize_sched();
++ kfree(old); /* Ignored if link == NULL */
++ }
++
+ return ret;
+ }
+