From: Greg Kroah-Hartman Date: Mon, 2 Dec 2013 17:27:26 +0000 (-0800) Subject: 3.4-stable patches X-Git-Tag: v3.4.72~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a03eb47cac8a58dfe1ec79f372442d451e1490c1;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: ftrace-fix-function-graph-with-loading-of-modules.patch media-lirc_zilog-don-t-use-dynamic-static-allocation.patch --- diff --git a/queue-3.4/ftrace-fix-function-graph-with-loading-of-modules.patch b/queue-3.4/ftrace-fix-function-graph-with-loading-of-modules.patch new file mode 100644 index 00000000000..9b5c21d6a2a --- /dev/null +++ b/queue-3.4/ftrace-fix-function-graph-with-loading-of-modules.patch @@ -0,0 +1,250 @@ +From 8a56d7761d2d041ae5e8215d20b4167d8aa93f51 Mon Sep 17 00:00:00 2001 +From: "Steven Rostedt (Red Hat)" +Date: Mon, 25 Nov 2013 20:59:46 -0500 +Subject: ftrace: Fix function graph with loading of modules + +From: "Steven Rostedt (Red Hat)" + +commit 8a56d7761d2d041ae5e8215d20b4167d8aa93f51 upstream. + +Commit 8c4f3c3fa9681 "ftrace: Check module functions being traced on reload" +fixed module loading and unloading with respect to function tracing, but +it missed the function graph tracer. If you perform the following + + # cd /sys/kernel/debug/tracing + # echo function_graph > current_tracer + # modprobe nfsd + # echo nop > current_tracer + +You'll get the following oops message: + + ------------[ cut here ]------------ + WARNING: CPU: 2 PID: 2910 at /linux.git/kernel/trace/ftrace.c:1640 __ftrace_hash_rec_update.part.35+0x168/0x1b9() + Modules linked in: nfsd exportfs nfs_acl lockd ipt_MASQUERADE sunrpc ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables uinput snd_hda_codec_idt + CPU: 2 PID: 2910 Comm: bash Not tainted 3.13.0-rc1-test #7 + Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 + 0000000000000668 ffff8800787efcf8 ffffffff814fe193 ffff88007d500000 + 0000000000000000 ffff8800787efd38 ffffffff8103b80a 0000000000000668 + ffffffff810b2b9a ffffffff81a48370 0000000000000001 ffff880037aea000 + Call Trace: + [] dump_stack+0x4f/0x7c + [] warn_slowpath_common+0x81/0x9b + [] ? __ftrace_hash_rec_update.part.35+0x168/0x1b9 + [] warn_slowpath_null+0x1a/0x1c + [] __ftrace_hash_rec_update.part.35+0x168/0x1b9 + [] ? __mutex_lock_slowpath+0x364/0x364 + [] ftrace_shutdown+0xd7/0x12b + [] unregister_ftrace_graph+0x49/0x78 + [] graph_trace_reset+0xe/0x10 + [] tracing_set_tracer+0xa7/0x26a + [] tracing_set_trace_write+0x8b/0xbd + [] ? ftrace_return_to_handler+0xb2/0xde + [] ? __sb_end_write+0x5e/0x5e + [] vfs_write+0xab/0xf6 + [] ftrace_graph_caller+0x85/0x85 + [] SyS_write+0x59/0x82 + [] ftrace_graph_caller+0x85/0x85 + [] system_call_fastpath+0x16/0x1b + ---[ end trace 940358030751eafb ]--- + +The above mentioned commit didn't go far enough. Well, it covered the +function tracer by adding checks in __register_ftrace_function(). The +problem is that the function graph tracer circumvents that (for a slight +efficiency gain when function graph trace is running with a function +tracer. The gain was not worth this). + +The problem came with ftrace_startup() which should always be called after +__register_ftrace_function(), if you want this bug to be completely fixed. + +Anyway, this solution moves __register_ftrace_function() inside of +ftrace_startup() and removes the need to call them both. + +Reported-by: Dave Wysochanski +Fixes: ed926f9b35cd ("ftrace: Use counters to enable functions to trace") +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + + +--- + kernel/trace/ftrace.c | 68 +++++++++++++++++++++++++------------------------- + 1 file changed, 34 insertions(+), 34 deletions(-) + +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -312,9 +312,6 @@ static int remove_ftrace_list_ops(struct + + static int __register_ftrace_function(struct ftrace_ops *ops) + { +- if (ftrace_disabled) +- return -ENODEV; +- + if (FTRACE_WARN_ON(ops == &global_ops)) + return -EINVAL; + +@@ -348,9 +345,6 @@ static int __unregister_ftrace_function( + { + int ret; + +- if (ftrace_disabled) +- return -ENODEV; +- + if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) + return -EBUSY; + +@@ -1940,10 +1934,15 @@ static void ftrace_startup_enable(int co + static int ftrace_startup(struct ftrace_ops *ops, int command) + { + bool hash_enable = true; ++ int ret; + + if (unlikely(ftrace_disabled)) + return -ENODEV; + ++ ret = __register_ftrace_function(ops); ++ if (ret) ++ return ret; ++ + ftrace_start_up++; + command |= FTRACE_UPDATE_CALLS; + +@@ -1965,12 +1964,17 @@ static int ftrace_startup(struct ftrace_ + return 0; + } + +-static void ftrace_shutdown(struct ftrace_ops *ops, int command) ++static int ftrace_shutdown(struct ftrace_ops *ops, int command) + { + bool hash_disable = true; ++ int ret; + + if (unlikely(ftrace_disabled)) +- return; ++ return -ENODEV; ++ ++ ret = __unregister_ftrace_function(ops); ++ if (ret) ++ return ret; + + ftrace_start_up--; + /* +@@ -2005,9 +2009,10 @@ static void ftrace_shutdown(struct ftrac + } + + if (!command || !ftrace_enabled) +- return; ++ return 0; + + ftrace_run_update_code(command); ++ return 0; + } + + static void ftrace_startup_sysctl(void) +@@ -2873,16 +2878,13 @@ static void __enable_ftrace_function_pro + if (i == FTRACE_FUNC_HASHSIZE) + return; + +- ret = __register_ftrace_function(&trace_probe_ops); +- if (!ret) +- ret = ftrace_startup(&trace_probe_ops, 0); ++ ret = ftrace_startup(&trace_probe_ops, 0); + + ftrace_probe_registered = 1; + } + + static void __disable_ftrace_function_probe(void) + { +- int ret; + int i; + + if (!ftrace_probe_registered) +@@ -2895,9 +2897,7 @@ static void __disable_ftrace_function_pr + } + + /* no more funcs left */ +- ret = __unregister_ftrace_function(&trace_probe_ops); +- if (!ret) +- ftrace_shutdown(&trace_probe_ops, 0); ++ ftrace_shutdown(&trace_probe_ops, 0); + + ftrace_probe_registered = 0; + } +@@ -3948,12 +3948,15 @@ device_initcall(ftrace_nodyn_init); + static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } + static inline void ftrace_startup_enable(int command) { } + /* Keep as macros so we do not need to define the commands */ +-# define ftrace_startup(ops, command) \ +- ({ \ +- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ +- 0; \ ++# define ftrace_startup(ops, command) \ ++ ({ \ ++ int ___ret = __register_ftrace_function(ops); \ ++ if (!___ret) \ ++ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ ++ ___ret; \ + }) +-# define ftrace_shutdown(ops, command) do { } while (0) ++# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) ++ + # define ftrace_startup_sysctl() do { } while (0) + # define ftrace_shutdown_sysctl() do { } while (0) + +@@ -4323,15 +4326,8 @@ int register_ftrace_function(struct ftra + + mutex_lock(&ftrace_lock); + +- if (unlikely(ftrace_disabled)) +- goto out_unlock; +- +- ret = __register_ftrace_function(ops); +- if (!ret) +- ret = ftrace_startup(ops, 0); ++ ret = ftrace_startup(ops, 0); + +- +- out_unlock: + mutex_unlock(&ftrace_lock); + return ret; + } +@@ -4348,9 +4344,7 @@ int unregister_ftrace_function(struct ft + int ret; + + mutex_lock(&ftrace_lock); +- ret = __unregister_ftrace_function(ops); +- if (!ret) +- ftrace_shutdown(ops, 0); ++ ret = ftrace_shutdown(ops, 0); + mutex_unlock(&ftrace_lock); + + return ret; +@@ -4544,6 +4538,12 @@ ftrace_suspend_notifier_call(struct noti + return NOTIFY_DONE; + } + ++/* Just a place holder for function graph */ ++static struct ftrace_ops fgraph_ops __read_mostly = { ++ .func = ftrace_stub, ++ .flags = FTRACE_OPS_FL_GLOBAL, ++}; ++ + int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) + { +@@ -4570,7 +4570,7 @@ int register_ftrace_graph(trace_func_gra + ftrace_graph_return = retfunc; + ftrace_graph_entry = entryfunc; + +- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); ++ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); + + out: + mutex_unlock(&ftrace_lock); +@@ -4587,7 +4587,7 @@ void unregister_ftrace_graph(void) + ftrace_graph_active--; + ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; + ftrace_graph_entry = ftrace_graph_entry_stub; +- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); ++ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); + unregister_pm_notifier(&ftrace_suspend_notifier); + unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); + diff --git a/queue-3.4/media-lirc_zilog-don-t-use-dynamic-static-allocation.patch b/queue-3.4/media-lirc_zilog-don-t-use-dynamic-static-allocation.patch new file mode 100644 index 00000000000..2520dd2cc00 --- /dev/null +++ b/queue-3.4/media-lirc_zilog-don-t-use-dynamic-static-allocation.patch @@ -0,0 +1,52 @@ +From ac5b4b6bf0c84c48d7e2e3fce22e35b04282ba76 Mon Sep 17 00:00:00 2001 +From: Mauro Carvalho Chehab +Date: Sat, 2 Nov 2013 08:16:47 -0300 +Subject: media: lirc_zilog: Don't use dynamic static allocation + +From: Mauro Carvalho Chehab + +commit ac5b4b6bf0c84c48d7e2e3fce22e35b04282ba76 upstream. + +Dynamic static allocation is evil, as Kernel stack is too low, and +ompilation complains about it on some archs: + drivers/staging/media/lirc/lirc_zilog.c:967:1: warning: 'read' uses dynamic stack allocation [enabled by default] +Instead, let's enforce a limit for the buffer to be 64. That should +be more than enough. + +Signed-off-by: Mauro Carvalho Chehab +Reviewed-by: Hans Verkuil +Signed-off-by: Mauro Carvalho Chehab +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/staging/media/lirc/lirc_zilog.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +--- a/drivers/staging/media/lirc/lirc_zilog.c ++++ b/drivers/staging/media/lirc/lirc_zilog.c +@@ -61,6 +61,9 @@ + #include + #include + ++/* Max transfer size done by I2C transfer functions */ ++#define MAX_XFER_SIZE 64 ++ + struct IR; + + struct IR_rx { +@@ -942,7 +945,14 @@ static ssize_t read(struct file *filep, + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } else { +- unsigned char buf[rbuf->chunk_size]; ++ unsigned char buf[MAX_XFER_SIZE]; ++ ++ if (rbuf->chunk_size > sizeof(buf)) { ++ zilog_error("chunk_size is too big (%d)!\n", ++ rbuf->chunk_size); ++ ret = -EINVAL; ++ break; ++ } + m = lirc_buffer_read(rbuf, buf); + if (m == rbuf->chunk_size) { + ret = copy_to_user((void *)outbuf+written, buf, diff --git a/queue-3.4/series b/queue-3.4/series index 1bcaeb534d1..82bc49b9b53 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -54,3 +54,5 @@ tracing-allow-events-to-have-null-strings.patch input-i8042-add-pnp-modaliases.patch kvm-perform-an-invalid-memslot-step-for-gpa-base-change.patch kvm-fix-iommu-map-unmap-to-handle-memory-slot-moves.patch +ftrace-fix-function-graph-with-loading-of-modules.patch +media-lirc_zilog-don-t-use-dynamic-static-allocation.patch