--- /dev/null
+From a949ae560a511fe4e3adf48fa44fefded93e5c2b Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Thu, 24 Apr 2014 10:40:12 -0400
+Subject: ftrace/module: Hardcode ftrace_module_init() call into load_module()
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit a949ae560a511fe4e3adf48fa44fefded93e5c2b upstream.
+
+A race exists between module loading and enabling of function tracer.
+
+ CPU 1 CPU 2
+ ----- -----
+ load_module()
+ module->state = MODULE_STATE_COMING
+
+ register_ftrace_function()
+ mutex_lock(&ftrace_lock);
+ ftrace_startup()
+ update_ftrace_function();
+ ftrace_arch_code_modify_prepare()
+ set_all_module_text_rw();
+ <enables-ftrace>
+ ftrace_arch_code_modify_post_process()
+ set_all_module_text_ro();
+
+ [ here all module text is set to RO,
+ including the module that is
+ loading!! ]
+
+ blocking_notifier_call_chain(MODULE_STATE_COMING);
+ ftrace_init_module()
+
+ [ tries to modify code, but it's RO, and fails!
+ ftrace_bug() is called]
+
+When this race happens, ftrace_bug() will produces a nasty warning and
+all of the function tracing features will be disabled until reboot.
+
+The simple solution is to treate module load the same way the core
+kernel is treated at boot. To hardcode the ftrace function modification
+of converting calls to mcount into nops. This is done in init/main.c
+there's no reason it could not be done in load_module(). This gives
+a better control of the changes and doesn't tie the state of the
+module to its notifiers as much. Ftrace is special, it needs to be
+treated as such.
+
+The reason this would work, is that the ftrace_module_init() would be
+called while the module is in MODULE_STATE_UNFORMED, which is ignored
+by the set_all_module_text_ro() call.
+
+Link: http://lkml.kernel.org/r/1395637826-3312-1-git-send-email-indou.takao@jp.fujitsu.com
+
+Reported-by: Takao Indoh <indou.takao@jp.fujitsu.com>
+Acked-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ftrace.h | 2 ++
+ kernel/module.c | 3 +++
+ kernel/trace/ftrace.c | 27 ++++-----------------------
+ 3 files changed, 9 insertions(+), 23 deletions(-)
+
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -524,6 +524,7 @@ static inline int ftrace_modify_call(str
+ extern int ftrace_arch_read_dyn_info(char *buf, int size);
+
+ extern int skip_trace(unsigned long ip);
++extern void ftrace_module_init(struct module *mod);
+
+ extern void ftrace_disable_daemon(void);
+ extern void ftrace_enable_daemon(void);
+@@ -533,6 +534,7 @@ static inline int ftrace_force_update(vo
+ static inline void ftrace_disable_daemon(void) { }
+ static inline void ftrace_enable_daemon(void) { }
+ static inline void ftrace_release_mod(struct module *mod) {}
++static inline void ftrace_module_init(struct module *mod) {}
+ static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
+ {
+ return -EINVAL;
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3265,6 +3265,9 @@ static int load_module(struct load_info
+
+ dynamic_debug_setup(info->debug, info->num_debug);
+
++ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
++ ftrace_module_init(mod);
++
+ /* Finally it's fully formed, ready to start executing. */
+ err = complete_formation(mod, info);
+ if (err)
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4315,16 +4315,11 @@ static void ftrace_init_module(struct mo
+ ftrace_process_locs(mod, start, end);
+ }
+
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
++void ftrace_module_init(struct module *mod)
+ {
+- struct module *mod = data;
+-
+- if (val == MODULE_STATE_COMING)
+- ftrace_init_module(mod, mod->ftrace_callsites,
+- mod->ftrace_callsites +
+- mod->num_ftrace_callsites);
+- return 0;
++ ftrace_init_module(mod, mod->ftrace_callsites,
++ mod->ftrace_callsites +
++ mod->num_ftrace_callsites);
+ }
+
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+@@ -4338,11 +4333,6 @@ static int ftrace_module_notify_exit(str
+ return 0;
+ }
+ #else
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
+-{
+- return 0;
+-}
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+ unsigned long val, void *data)
+ {
+@@ -4350,11 +4340,6 @@ static int ftrace_module_notify_exit(str
+ }
+ #endif /* CONFIG_MODULES */
+
+-struct notifier_block ftrace_module_enter_nb = {
+- .notifier_call = ftrace_module_notify_enter,
+- .priority = INT_MAX, /* Run before anything that can use kprobes */
+-};
+-
+ struct notifier_block ftrace_module_exit_nb = {
+ .notifier_call = ftrace_module_notify_exit,
+ .priority = INT_MIN, /* Run after anything that can remove kprobes */
+@@ -4391,10 +4376,6 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
+- ret = register_module_notifier(&ftrace_module_enter_nb);
+- if (ret)
+- pr_warning("Failed to register trace ftrace module enter notifier\n");
+-
+ ret = register_module_notifier(&ftrace_module_exit_nb);
+ if (ret)
+ pr_warning("Failed to register trace ftrace module exit notifier\n");
--- /dev/null
+From 397335f004f41e5fcf7a795e94eb3ab83411a17c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 22 May 2014 03:25:39 +0000
+Subject: rtmutex: Fix deadlock detector for real
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 397335f004f41e5fcf7a795e94eb3ab83411a17c upstream.
+
+The current deadlock detection logic does not work reliably due to the
+following early exit path:
+
+ /*
+ * Drop out, when the task has no waiters. Note,
+ * top_waiter can be NULL, when we are in the deboosting
+ * mode!
+ */
+ if (top_waiter && (!task_has_pi_waiters(task) ||
+ top_waiter != task_top_pi_waiter(task)))
+ goto out_unlock_pi;
+
+So this not only exits when the task has no waiters, it also exits
+unconditionally when the current waiter is not the top priority waiter
+of the task.
+
+So in a nested locking scenario, it might abort the lock chain walk
+and therefor miss a potential deadlock.
+
+Simple fix: Continue the chain walk, when deadlock detection is
+enabled.
+
+We also avoid the whole enqueue, if we detect the deadlock right away
+(A-A). It's an optimization, but also prevents that another waiter who
+comes in after the detection and before the task has undone the damage
+observes the situation and detects the deadlock and returns
+-EDEADLOCK, which is wrong as the other task is not in a deadlock
+situation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
+Link: http://lkml.kernel.org/r/20140522031949.725272460@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/rtmutex.c | 32 ++++++++++++++++++++++++++++----
+ 1 file changed, 28 insertions(+), 4 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -331,9 +331,16 @@ static int rt_mutex_adjust_prio_chain(st
+ * top_waiter can be NULL, when we are in the deboosting
+ * mode!
+ */
+- if (top_waiter && (!task_has_pi_waiters(task) ||
+- top_waiter != task_top_pi_waiter(task)))
+- goto out_unlock_pi;
++ if (top_waiter) {
++ if (!task_has_pi_waiters(task))
++ goto out_unlock_pi;
++ /*
++ * If deadlock detection is off, we stop here if we
++ * are not the top pi waiter of the task.
++ */
++ if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
++ goto out_unlock_pi;
++ }
+
+ /*
+ * When deadlock detection is off then we check, if further
+@@ -349,7 +356,12 @@ static int rt_mutex_adjust_prio_chain(st
+ goto retry;
+ }
+
+- /* Deadlock detection */
++ /*
++ * Deadlock detection. If the lock is the same as the original
++ * lock which caused us to walk the lock chain or if the
++ * current lock is owned by the task which initiated the chain
++ * walk, we detected a deadlock.
++ */
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ raw_spin_unlock(&lock->wait_lock);
+@@ -515,6 +527,18 @@ static int task_blocks_on_rt_mutex(struc
+ unsigned long flags;
+ int chain_walk = 0, res;
+
++ /*
++ * Early deadlock detection. We really don't want the task to
++ * enqueue on itself just to untangle the mess later. It's not
++ * only an optimization. We drop the locks, so another waiter
++ * can come in before the chain walk detects the deadlock. So
++ * the other will detect the deadlock and return -EDEADLOCK,
++ * which is wrong, as the other waiter is not in a deadlock
++ * situation.
++ */
++ if (detect_deadlock && owner == task)
++ return -EDEADLK;
++
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;