gianfar-do-not-advertise-any-alarm-capability.patch
tty-fix-up-atime-mtime-mess-take-three.patch
fbcon-when-font-is-freed-clear-also-vc_font.data.patch
+tracing-use-stack-of-calling-function-for-stack-tracer.patch
+tracing-fix-stack-tracer-with-fentry-use.patch
+tracing-remove-most-or-all-of-stack-tracer-stack-size-from-stack_max_size.patch
+tracing-fix-off-by-one-on-allocating-stat-pages.patch
+tracing-check-return-value-of-tracing_init_dentry.patch
+tracing-reset-ftrace_graph_filter_enabled-if-count-is-zero.patch
--- /dev/null
+From ed6f1c996bfe4b6e520cf7a74b51cd6988d84420 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung.kim@lge.com>
+Date: Wed, 10 Apr 2013 09:18:12 +0900
+Subject: tracing: Check return value of tracing_init_dentry()
+
+From: Namhyung Kim <namhyung.kim@lge.com>
+
+commit ed6f1c996bfe4b6e520cf7a74b51cd6988d84420 upstream.
+
+Check return value and bail out if it's NULL.
+
+Link: http://lkml.kernel.org/r/1365553093-10180-2-git-send-email-namhyung@kernel.org
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Namhyung Kim <namhyung.kim@lge.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 2 ++
+ kernel/trace/trace_stack.c | 2 ++
+ kernel/trace/trace_stat.c | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4885,6 +4885,8 @@ static __init int tracer_init_debugfs(vo
+ trace_access_lock_init();
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("trace_options", 0644, d_tracer,
+ NULL, &tracing_iter_fops);
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -431,6 +431,8 @@ static __init int stack_trace_init(void)
+ struct dentry *d_tracer;
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("stack_max_size", 0644, d_tracer,
+ &max_stack_size, &stack_max_size_fops);
+--- a/kernel/trace/trace_stat.c
++++ b/kernel/trace/trace_stat.c
+@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
+ struct dentry *d_tracing;
+
+ d_tracing = tracing_init_dentry();
++ if (!d_tracing)
++ return 0;
+
+ stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+ if (!stat_dir)
--- /dev/null
+From 39e30cd1537937d3c00ef87e865324e981434e5b Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung.kim@lge.com>
+Date: Mon, 1 Apr 2013 21:46:24 +0900
+Subject: tracing: Fix off-by-one on allocating stat->pages
+
+From: Namhyung Kim <namhyung.kim@lge.com>
+
+commit 39e30cd1537937d3c00ef87e865324e981434e5b upstream.
+
+The first page was allocated separately, so no need to start from 0.
+
+Link: http://lkml.kernel.org/r/1364820385-32027-2-git-send-email-namhyung@kernel.org
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Namhyung Kim <namhyung.kim@lge.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -650,7 +650,7 @@ int ftrace_profile_pages_init(struct ftr
+
+ pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+- for (i = 0; i < pages; i++) {
++ for (i = 1; i < pages; i++) {
+ pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pg->next)
+ goto out_free;
--- /dev/null
+From d4ecbfc49b4b1d4b597fb5ba9e4fa25d62f105c5 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Wed, 13 Mar 2013 21:25:35 -0400
+Subject: tracing: Fix stack tracer with fentry use
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit d4ecbfc49b4b1d4b597fb5ba9e4fa25d62f105c5 upstream.
+
+When gcc 4.6 on x86 is used, the function tracer will use the new
+option -mfentry which does a call to "fentry" at every function
+instead of "mcount". The significance of this is that fentry is
+called as the first operation of the function instead of the mcount
+usage of being called after the stack.
+
+This causes the stack tracer to show some bogus results for the size
+of the last function traced, as well as showing "ftrace_call" instead
+of the function. This is due to the stack frame not being set up
+by the function that is about to be traced.
+
+ # cat stack_trace
+ Depth Size Location (48 entries)
+ ----- ---- --------
+ 0) 4824 216 ftrace_call+0x5/0x2f
+ 1) 4608 112 ____cache_alloc+0xb7/0x22d
+ 2) 4496 80 kmem_cache_alloc+0x63/0x12f
+
+The 216 size for ftrace_call includes both the ftrace_call stack
+(which includes the saving of registers it does), as well as the
+stack size of the parent.
+
+To fix this, if CC_USING_FENTRY is defined, then the stack_tracer
+will reserve the first item in stack_dump_trace[] array when
+calling save_stack_trace(), and it will fill it in with the parent ip.
+Then the code will look for the parent pointer on the stack and
+give the real size of the parent's stack pointer:
+
+ # cat stack_trace
+ Depth Size Location (14 entries)
+ ----- ---- --------
+ 0) 2640 48 update_group_power+0x26/0x187
+ 1) 2592 224 update_sd_lb_stats+0x2a5/0x4ac
+ 2) 2368 160 find_busiest_group+0x31/0x1f1
+ 3) 2208 256 load_balance+0xd9/0x662
+
+I'm Cc'ing stable, although it's not urgent, as it only shows bogus
+size for item #0, the rest of the trace is legit. It should still be
+corrected in previous stable releases.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_stack.c | 33 +++++++++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -20,13 +20,27 @@
+
+ #define STACK_TRACE_ENTRIES 500
+
++/*
++ * If fentry is used, then the function being traced will
++ * jump to fentry directly before it sets up its stack frame.
++ * We need to ignore that one and record the parent. Since
++ * the stack frame for the traced function wasn't set up yet,
++ * the stack_trace wont see the parent. That needs to be added
++ * manually to stack_dump_trace[] as the first element.
++ */
++#ifdef CC_USING_FENTRY
++# define add_func 1
++#else
++# define add_func 0
++#endif
++
+ static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
+ { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
+ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+
+ static struct stack_trace max_stack_trace = {
+- .max_entries = STACK_TRACE_ENTRIES,
+- .entries = stack_dump_trace,
++ .max_entries = STACK_TRACE_ENTRIES - add_func,
++ .entries = &stack_dump_trace[add_func],
+ };
+
+ static unsigned long max_stack_size;
+@@ -40,7 +54,7 @@ int stack_tracer_enabled;
+ static int last_stack_tracer_enabled;
+
+ static inline void
+-check_stack(unsigned long *stack)
++check_stack(unsigned long ip, unsigned long *stack)
+ {
+ unsigned long this_size, flags;
+ unsigned long *p, *top, *start;
+@@ -71,6 +85,17 @@ check_stack(unsigned long *stack)
+ save_stack_trace(&max_stack_trace);
+
+ /*
++ * When fentry is used, the traced function does not get
++ * its stack frame set up, and we lose the parent.
++ * Add that one in manally. We set up save_stack_trace()
++ * to not touch the first element in this case.
++ */
++ if (add_func) {
++ stack_dump_trace[0] = ip;
++ max_stack_trace.nr_entries++;
++ }
++
++ /*
+ * Now find where in the stack these are.
+ */
+ i = 0;
+@@ -124,7 +149,7 @@ stack_trace_call(unsigned long ip, unsig
+ if (per_cpu(trace_active, cpu)++ != 0)
+ goto out;
+
+- check_stack(&stack);
++ check_stack(parent_ip, &stack);
+
+ out:
+ per_cpu(trace_active, cpu)--;
--- /dev/null
+From 4df297129f622bdc18935c856f42b9ddd18f9f28 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Wed, 13 Mar 2013 23:34:22 -0400
+Subject: tracing: Remove most or all of stack tracer stack size from stack_max_size
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 4df297129f622bdc18935c856f42b9ddd18f9f28 upstream.
+
+Currently, the depth reported in the stack tracer stack_trace file
+does not match the stack_max_size file. This is because the stack_max_size
+includes the overhead of stack tracer itself while the depth does not.
+
+The first time a max is triggered, a calculation is not performed that
+figures out the overhead of the stack tracer and subtracts it from
+the stack_max_size variable. The overhead is stored and is subtracted
+from the reported stack size for comparing for a new max.
+
+Now the stack_max_size corresponds to the reported depth:
+
+ # cat stack_max_size
+4640
+
+ # cat stack_trace
+ Depth Size Location (48 entries)
+ ----- ---- --------
+ 0) 4640 32 _raw_spin_lock+0x18/0x24
+ 1) 4608 112 ____cache_alloc+0xb7/0x22d
+ 2) 4496 80 kmem_cache_alloc+0x63/0x12f
+ 3) 4416 16 mempool_alloc_slab+0x15/0x17
+[...]
+
+While testing against and older gcc on x86 that uses mcount instead
+of fentry, I found that pasing in ip + MCOUNT_INSN_SIZE let the
+stack trace show one more function deep which was missing before.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_stack.c | 75 ++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 54 insertions(+), 21 deletions(-)
+
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -20,27 +20,24 @@
+
+ #define STACK_TRACE_ENTRIES 500
+
+-/*
+- * If fentry is used, then the function being traced will
+- * jump to fentry directly before it sets up its stack frame.
+- * We need to ignore that one and record the parent. Since
+- * the stack frame for the traced function wasn't set up yet,
+- * the stack_trace wont see the parent. That needs to be added
+- * manually to stack_dump_trace[] as the first element.
+- */
+ #ifdef CC_USING_FENTRY
+-# define add_func 1
++# define fentry 1
+ #else
+-# define add_func 0
++# define fentry 0
+ #endif
+
+ static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
+ { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
+ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+
++/*
++ * Reserve one entry for the passed in ip. This will allow
++ * us to remove most or all of the stack size overhead
++ * added by the stack tracer itself.
++ */
+ static struct stack_trace max_stack_trace = {
+- .max_entries = STACK_TRACE_ENTRIES - add_func,
+- .entries = &stack_dump_trace[add_func],
++ .max_entries = STACK_TRACE_ENTRIES - 1,
++ .entries = &stack_dump_trace[1],
+ };
+
+ static unsigned long max_stack_size;
+@@ -58,10 +55,14 @@ check_stack(unsigned long ip, unsigned l
+ {
+ unsigned long this_size, flags;
+ unsigned long *p, *top, *start;
++ static int tracer_frame;
++ int frame_size = ACCESS_ONCE(tracer_frame);
+ int i;
+
+ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
+ this_size = THREAD_SIZE - this_size;
++ /* Remove the frame of the tracer */
++ this_size -= frame_size;
+
+ if (this_size <= max_stack_size)
+ return;
+@@ -73,6 +74,10 @@ check_stack(unsigned long ip, unsigned l
+ local_irq_save(flags);
+ arch_spin_lock(&max_stack_lock);
+
++ /* In case another CPU set the tracer_frame on us */
++ if (unlikely(!frame_size))
++ this_size -= tracer_frame;
++
+ /* a race could have already updated it */
+ if (this_size <= max_stack_size)
+ goto out;
+@@ -85,15 +90,12 @@ check_stack(unsigned long ip, unsigned l
+ save_stack_trace(&max_stack_trace);
+
+ /*
+- * When fentry is used, the traced function does not get
+- * its stack frame set up, and we lose the parent.
+- * Add that one in manally. We set up save_stack_trace()
+- * to not touch the first element in this case.
++ * Add the passed in ip from the function tracer.
++ * Searching for this on the stack will skip over
++ * most of the overhead from the stack tracer itself.
+ */
+- if (add_func) {
+- stack_dump_trace[0] = ip;
+- max_stack_trace.nr_entries++;
+- }
++ stack_dump_trace[0] = ip;
++ max_stack_trace.nr_entries++;
+
+ /*
+ * Now find where in the stack these are.
+@@ -123,6 +125,18 @@ check_stack(unsigned long ip, unsigned l
+ found = 1;
+ /* Start the search from here */
+ start = p + 1;
++ /*
++ * We do not want to show the overhead
++ * of the stack tracer stack in the
++ * max stack. If we haven't figured
++ * out what that is, then figure it out
++ * now.
++ */
++ if (unlikely(!tracer_frame) && i == 1) {
++ tracer_frame = (p - stack) *
++ sizeof(unsigned long);
++ max_stack_size -= tracer_frame;
++ }
+ }
+ }
+
+@@ -149,7 +163,26 @@ stack_trace_call(unsigned long ip, unsig
+ if (per_cpu(trace_active, cpu)++ != 0)
+ goto out;
+
+- check_stack(parent_ip, &stack);
++ /*
++ * When fentry is used, the traced function does not get
++ * its stack frame set up, and we lose the parent.
++ * The ip is pretty useless because the function tracer
++ * was called before that function set up its stack frame.
++ * In this case, we use the parent ip.
++ *
++ * By adding the return address of either the parent ip
++ * or the current ip we can disregard most of the stack usage
++ * caused by the stack tracer itself.
++ *
++ * The function tracer always reports the address of where the
++ * mcount call was, but the stack will hold the return address.
++ */
++ if (fentry)
++ ip = parent_ip;
++ else
++ ip += MCOUNT_INSN_SIZE;
++
++ check_stack(ip, &stack);
+
+ out:
+ per_cpu(trace_active, cpu)--;
--- /dev/null
+From 9f50afccfdc15d95d7331acddcb0f7703df089ae Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung.kim@lge.com>
+Date: Thu, 11 Apr 2013 16:01:38 +0900
+Subject: tracing: Reset ftrace_graph_filter_enabled if count is zero
+
+From: Namhyung Kim <namhyung.kim@lge.com>
+
+commit 9f50afccfdc15d95d7331acddcb0f7703df089ae upstream.
+
+The ftrace_graph_count can be decreased with a "!" pattern, so that
+the enabled flag should be updated too.
+
+Link: http://lkml.kernel.org/r/1365663698-2413-1-git-send-email-namhyung@kernel.org
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Namhyung Kim <namhyung.kim@lge.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3714,7 +3714,8 @@ out:
+ if (fail)
+ return -EINVAL;
+
+- ftrace_graph_filter_enabled = 1;
++ ftrace_graph_filter_enabled = !!(*idx);
++
+ return 0;
+ }
+
--- /dev/null
+From 87889501d0adfae10e3b0f0e6f2d7536eed9ae84 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Wed, 13 Mar 2013 20:43:57 -0400
+Subject: tracing: Use stack of calling function for stack tracer
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 87889501d0adfae10e3b0f0e6f2d7536eed9ae84 upstream.
+
+Use the stack of stack_trace_call() instead of check_stack() as
+the test pointer for max stack size. It makes it a bit cleaner
+and a little more accurate.
+
+Adding stable, as a later fix depends on this patch.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_stack.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -39,20 +39,21 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
+ int stack_tracer_enabled;
+ static int last_stack_tracer_enabled;
+
+-static inline void check_stack(void)
++static inline void
++check_stack(unsigned long *stack)
+ {
+ unsigned long this_size, flags;
+ unsigned long *p, *top, *start;
+ int i;
+
+- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
++ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
+ this_size = THREAD_SIZE - this_size;
+
+ if (this_size <= max_stack_size)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(&this_size))
++ if (!object_is_on_stack(stack))
+ return;
+
+ local_irq_save(flags);
+@@ -73,7 +74,7 @@ static inline void check_stack(void)
+ * Now find where in the stack these are.
+ */
+ i = 0;
+- start = &this_size;
++ start = stack;
+ top = (unsigned long *)
+ (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
+
+@@ -113,6 +114,7 @@ static void
+ stack_trace_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
+ {
++ unsigned long stack;
+ int cpu;
+
+ preempt_disable_notrace();
+@@ -122,7 +124,7 @@ stack_trace_call(unsigned long ip, unsig
+ if (per_cpu(trace_active, cpu)++ != 0)
+ goto out;
+
+- check_stack();
++ check_stack(&stack);
+
+ out:
+ per_cpu(trace_active, cpu)--;