From: Greg Kroah-Hartman Date: Thu, 22 Aug 2013 22:58:15 +0000 (-0700) Subject: 3.10-stable patches X-Git-Tag: v3.0.94~14 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7b3ae8a3242118a450f26921a6a491385d4d50d9;p=thirdparty%2Fkernel%2Fstable-queue.git 3.10-stable patches added patches: ftrace-add-check-for-null-regs-if-ops-has-save_regs-set.patch tracing-change-tracing_buffers_fops-to-rely-on-tracing_get_cpu.patch tracing-change-tracing_entries_fops-to-rely-on-tracing_get_cpu.patch tracing-change-tracing_fops-snapshot_fops-to-rely-on-tracing_get_cpu.patch tracing-change-tracing_pipe_fops-to-rely-on-tracing_get_cpu.patch tracing-change-tracing_stats_fops-to-rely-on.patch tracing-introduce-trace_create_cpu_file-and.patch tracing-turn-event-id-i_private-into-call-event.type.patch --- diff --git a/queue-3.10/ftrace-add-check-for-null-regs-if-ops-has-save_regs-set.patch b/queue-3.10/ftrace-add-check-for-null-regs-if-ops-has-save_regs-set.patch new file mode 100644 index 00000000000..3d278b0fe5e --- /dev/null +++ b/queue-3.10/ftrace-add-check-for-null-regs-if-ops-has-save_regs-set.patch @@ -0,0 +1,84 @@ +From 195a8afc7ac962f8da795549fe38e825f1372b0d Mon Sep 17 00:00:00 2001 +From: "Steven Rostedt (Red Hat)" +Date: Tue, 23 Jul 2013 22:06:15 -0400 +Subject: ftrace: Add check for NULL regs if ops has SAVE_REGS set + +From: "Steven Rostedt (Red Hat)" + +commit 195a8afc7ac962f8da795549fe38e825f1372b0d upstream. + +If a ftrace ops is registered with the SAVE_REGS flag set, and there's +already a ops registered to one of its functions but without the +SAVE_REGS flag, there's a small race window where the SAVE_REGS ops gets +added to the list of callbacks to call for that function before the +callback trampoline gets set to save the regs. + +The problem is, the function is not currently saving regs, which opens +a small race window where the ops that is expecting regs to be passed +to it, wont. This can cause a crash if the callback were to reference +the regs, as the SAVE_REGS guarantees that regs will be set. + +To fix this, we add a check in the loop case where it checks if the ops +has the SAVE_REGS flag set, and if so, it will ignore it if regs is +not set. + +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/ftrace.c | 18 ++++++++++++++---- + 1 file changed, 14 insertions(+), 4 deletions(-) + +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -1416,12 +1416,22 @@ ftrace_hash_move(struct ftrace_ops *ops, + * the hashes are freed with call_rcu_sched(). + */ + static int +-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ++ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) + { + struct ftrace_hash *filter_hash; + struct ftrace_hash *notrace_hash; + int ret; + ++#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS ++ /* ++ * There's a small race when adding ops that the ftrace handler ++ * that wants regs, may be called without them. We can not ++ * allow that handler to be called if regs is NULL. ++ */ ++ if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) ++ return 0; ++#endif ++ + filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); + notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); + +@@ -4188,7 +4198,7 @@ static inline void ftrace_startup_enable + # define ftrace_shutdown_sysctl() do { } while (0) + + static inline int +-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ++ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) + { + return 1; + } +@@ -4211,7 +4221,7 @@ ftrace_ops_control_func(unsigned long ip + do_for_each_ftrace_op(op, ftrace_control_list) { + if (!(op->flags & FTRACE_OPS_FL_STUB) && + !ftrace_function_local_disabled(op) && +- ftrace_ops_test(op, ip)) ++ ftrace_ops_test(op, ip, regs)) + op->func(ip, parent_ip, op, regs); + } while_for_each_ftrace_op(op); + trace_recursion_clear(TRACE_CONTROL_BIT); +@@ -4244,7 +4254,7 @@ __ftrace_ops_list_func(unsigned long ip, + */ + preempt_disable_notrace(); + do_for_each_ftrace_op(op, ftrace_ops_list) { +- if (ftrace_ops_test(op, ip)) ++ if (ftrace_ops_test(op, ip, regs)) + op->func(ip, parent_ip, op, regs); + } while_for_each_ftrace_op(op); + preempt_enable_notrace(); diff --git a/queue-3.10/series b/queue-3.10/series index 43379e5314c..4418fc4ab9f 100644 --- a/queue-3.10/series +++ b/queue-3.10/series @@ -16,3 +16,11 @@ iwlwifi-bump-required-firmware-api-version-for-3160-7260.patch iwlwifi-mvm-adjust-firmware-d3-configuration-api.patch tracing-do-not-call-kmem_cache_free-on-allocation-failure.patch tracing-kprobe-wait-for-disabling-all-running-kprobe-handlers.patch +tracing-introduce-trace_create_cpu_file-and.patch +tracing-change-tracing_pipe_fops-to-rely-on-tracing_get_cpu.patch +tracing-change-tracing_buffers_fops-to-rely-on-tracing_get_cpu.patch +tracing-change-tracing_stats_fops-to-rely-on.patch +tracing-change-tracing_entries_fops-to-rely-on-tracing_get_cpu.patch +tracing-change-tracing_fops-snapshot_fops-to-rely-on-tracing_get_cpu.patch +ftrace-add-check-for-null-regs-if-ops-has-save_regs-set.patch +tracing-turn-event-id-i_private-into-call-event.type.patch diff --git a/queue-3.10/tracing-change-tracing_buffers_fops-to-rely-on-tracing_get_cpu.patch b/queue-3.10/tracing-change-tracing_buffers_fops-to-rely-on-tracing_get_cpu.patch new file mode 100644 index 00000000000..326e3642c63 --- /dev/null +++ b/queue-3.10/tracing-change-tracing_buffers_fops-to-rely-on-tracing_get_cpu.patch @@ -0,0 +1,67 @@ +From 46ef2be0d1d5ccea0c41bb606143586daadd537c Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Tue, 23 Jul 2013 17:26:00 +0200 +Subject: tracing: Change tracing_buffers_fops to rely on tracing_get_cpu() + +From: Oleg Nesterov + +commit 46ef2be0d1d5ccea0c41bb606143586daadd537c upstream. + +tracing_buffers_open() is racy, the memory inode->i_private points +to can be already freed. + +Change debugfs_create_file("trace_pipe_raw", data) caller to pass +"data = tr", tracing_buffers_open() can use tracing_get_cpu(). + +Change debugfs_create_file("snapshot_raw_fops", data) caller too, +this file uses tracing_buffers_open/release. + +Link: http://lkml.kernel.org/r/20130723152600.GA23720@redhat.com + +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4941,8 +4941,7 @@ static const struct file_operations snap + + static int tracing_buffers_open(struct inode *inode, struct file *filp) + { +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; ++ struct trace_array *tr = inode->i_private; + struct ftrace_buffer_info *info; + int ret; + +@@ -4961,7 +4960,7 @@ static int tracing_buffers_open(struct i + mutex_lock(&trace_types_lock); + + info->iter.tr = tr; +- info->iter.cpu_file = tc->cpu; ++ info->iter.cpu_file = tracing_get_cpu(inode); + info->iter.trace = tr->current_trace; + info->iter.trace_buffer = &tr->trace_buffer; + info->spare = NULL; +@@ -5568,7 +5567,7 @@ tracing_init_debugfs_percpu(struct trace + &data->trace_cpu, cpu, &tracing_fops); + + trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, +- &data->trace_cpu, cpu, &tracing_buffers_fops); ++ tr, cpu, &tracing_buffers_fops); + + trace_create_cpu_file("stats", 0444, d_cpu, + &data->trace_cpu, cpu, &tracing_stats_fops); +@@ -5581,7 +5580,7 @@ tracing_init_debugfs_percpu(struct trace + &data->trace_cpu, cpu, &snapshot_fops); + + trace_create_cpu_file("snapshot_raw", 0444, d_cpu, +- &data->trace_cpu, cpu, &snapshot_raw_fops); ++ tr, cpu, &snapshot_raw_fops); + #endif + } + diff --git a/queue-3.10/tracing-change-tracing_entries_fops-to-rely-on-tracing_get_cpu.patch b/queue-3.10/tracing-change-tracing_entries_fops-to-rely-on-tracing_get_cpu.patch new file mode 100644 index 00000000000..78ef660a31e --- /dev/null +++ b/queue-3.10/tracing-change-tracing_entries_fops-to-rely-on-tracing_get_cpu.patch @@ -0,0 +1,157 @@ +From 0bc392ee46d0fd8e6b678457ef71f074f19a03c5 Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Tue, 23 Jul 2013 17:26:06 +0200 +Subject: tracing: Change tracing_entries_fops to rely on tracing_get_cpu() + +From: Oleg Nesterov + +commit 0bc392ee46d0fd8e6b678457ef71f074f19a03c5 upstream. + +tracing_open_generic_tc() is racy, the memory inode->i_private +points to can be already freed. + +1. Change its last user, tracing_entries_fops, to use + tracing_*_generic_tr() instead. + +2. Change debugfs_create_file("buffer_size_kb", data) callers + to pass "data = tr". + +3. Change tracing_entries_read() and tracing_entries_write() to + use tracing_get_cpu(). + +4. Kill the no longer used tracing_open_generic_tc() and + tracing_release_generic_tc(). + +Link: http://lkml.kernel.org/r/20130723152606.GA23730@redhat.com + +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace.c | 49 ++++++++++++------------------------------------- + 1 file changed, 12 insertions(+), 37 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2975,23 +2975,6 @@ int tracing_open_generic_tr(struct inode + return 0; + } + +-int tracing_open_generic_tc(struct inode *inode, struct file *filp) +-{ +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; +- +- if (tracing_disabled) +- return -ENODEV; +- +- if (trace_array_get(tr) < 0) +- return -ENODEV; +- +- filp->private_data = inode->i_private; +- +- return 0; +- +-} +- + static int tracing_release(struct inode *inode, struct file *file) + { + struct seq_file *m = file->private_data; +@@ -3045,15 +3028,6 @@ static int tracing_release_generic_tr(st + return 0; + } + +-static int tracing_release_generic_tc(struct inode *inode, struct file *file) +-{ +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; +- +- trace_array_put(tr); +- return 0; +-} +- + static int tracing_single_release_tr(struct inode *inode, struct file *file) + { + struct trace_array *tr = inode->i_private; +@@ -4374,15 +4348,16 @@ static ssize_t + tracing_entries_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) + { +- struct trace_cpu *tc = filp->private_data; +- struct trace_array *tr = tc->tr; ++ struct inode *inode = file_inode(filp); ++ struct trace_array *tr = inode->i_private; ++ int cpu = tracing_get_cpu(inode); + char buf[64]; + int r = 0; + ssize_t ret; + + mutex_lock(&trace_types_lock); + +- if (tc->cpu == RING_BUFFER_ALL_CPUS) { ++ if (cpu == RING_BUFFER_ALL_CPUS) { + int cpu, buf_size_same; + unsigned long size; + +@@ -4409,7 +4384,7 @@ tracing_entries_read(struct file *filp, + } else + r = sprintf(buf, "X\n"); + } else +- r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); ++ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); + + mutex_unlock(&trace_types_lock); + +@@ -4421,7 +4396,8 @@ static ssize_t + tracing_entries_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) + { +- struct trace_cpu *tc = filp->private_data; ++ struct inode *inode = file_inode(filp); ++ struct trace_array *tr = inode->i_private; + unsigned long val; + int ret; + +@@ -4435,8 +4411,7 @@ tracing_entries_write(struct file *filp, + + /* value is in KB */ + val <<= 10; +- +- ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu); ++ ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); + if (ret < 0) + return ret; + +@@ -4884,11 +4859,11 @@ static const struct file_operations trac + }; + + static const struct file_operations tracing_entries_fops = { +- .open = tracing_open_generic_tc, ++ .open = tracing_open_generic_tr, + .read = tracing_entries_read, + .write = tracing_entries_write, + .llseek = generic_file_llseek, +- .release = tracing_release_generic_tc, ++ .release = tracing_release_generic_tr, + }; + + static const struct file_operations tracing_total_entries_fops = { +@@ -5572,7 +5547,7 @@ tracing_init_debugfs_percpu(struct trace + tr, cpu, &tracing_stats_fops); + + trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, +- &data->trace_cpu, cpu, &tracing_entries_fops); ++ tr, cpu, &tracing_entries_fops); + + #ifdef CONFIG_TRACER_SNAPSHOT + trace_create_cpu_file("snapshot", 0644, d_cpu, +@@ -6148,7 +6123,7 @@ init_tracer_debugfs(struct trace_array * + tr, &tracing_pipe_fops); + + trace_create_file("buffer_size_kb", 0644, d_tracer, +- (void *)&tr->trace_cpu, &tracing_entries_fops); ++ tr, &tracing_entries_fops); + + trace_create_file("buffer_total_size_kb", 0444, d_tracer, + tr, &tracing_total_entries_fops); diff --git a/queue-3.10/tracing-change-tracing_fops-snapshot_fops-to-rely-on-tracing_get_cpu.patch b/queue-3.10/tracing-change-tracing_fops-snapshot_fops-to-rely-on-tracing_get_cpu.patch new file mode 100644 index 00000000000..17e8eb1346b --- /dev/null +++ b/queue-3.10/tracing-change-tracing_fops-snapshot_fops-to-rely-on-tracing_get_cpu.patch @@ -0,0 +1,191 @@ +From 6484c71cbc170634fa131b6d022d86d61686b88b Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Tue, 23 Jul 2013 17:26:10 +0200 +Subject: tracing: Change tracing_fops/snapshot_fops to rely on tracing_get_cpu() + +From: Oleg Nesterov + +commit 6484c71cbc170634fa131b6d022d86d61686b88b upstream. + +tracing_open() and tracing_snapshot_open() are racy, the memory +inode->i_private points to can be already freed. + +Convert these last users of "inode->i_private == trace_cpu" to +use "i_private = trace_array" and rely on tracing_get_cpu(). + +v2: incorporate the fix from Steven, tracing_release() must not + blindly dereference file->private_data unless we know that + the file was opened for reading. + +Link: http://lkml.kernel.org/r/20130723152610.GA23737@redhat.com + +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace.c | 50 ++++++++++++++++++++++---------------------------- + 1 file changed, 22 insertions(+), 28 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2853,9 +2853,9 @@ static const struct seq_operations trace + }; + + static struct trace_iterator * +-__tracing_open(struct trace_array *tr, struct trace_cpu *tc, +- struct inode *inode, struct file *file, bool snapshot) ++__tracing_open(struct inode *inode, struct file *file, bool snapshot) + { ++ struct trace_array *tr = inode->i_private; + struct trace_iterator *iter; + int cpu; + +@@ -2896,8 +2896,8 @@ __tracing_open(struct trace_array *tr, s + iter->trace_buffer = &tr->trace_buffer; + iter->snapshot = snapshot; + iter->pos = -1; ++ iter->cpu_file = tracing_get_cpu(inode); + mutex_init(&iter->mutex); +- iter->cpu_file = tc->cpu; + + /* Notify the tracer early; before we stop tracing. */ + if (iter->trace && iter->trace->open) +@@ -2977,22 +2977,18 @@ int tracing_open_generic_tr(struct inode + + static int tracing_release(struct inode *inode, struct file *file) + { ++ struct trace_array *tr = inode->i_private; + struct seq_file *m = file->private_data; + struct trace_iterator *iter; +- struct trace_array *tr; + int cpu; + +- /* Writes do not use seq_file, need to grab tr from inode */ + if (!(file->f_mode & FMODE_READ)) { +- struct trace_cpu *tc = inode->i_private; +- +- trace_array_put(tc->tr); ++ trace_array_put(tr); + return 0; + } + ++ /* Writes do not use seq_file */ + iter = m->private; +- tr = iter->tr; +- + mutex_lock(&trace_types_lock); + + for_each_tracing_cpu(cpu) { +@@ -3039,8 +3035,7 @@ static int tracing_single_release_tr(str + + static int tracing_open(struct inode *inode, struct file *file) + { +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; ++ struct trace_array *tr = inode->i_private; + struct trace_iterator *iter; + int ret = 0; + +@@ -3048,16 +3043,17 @@ static int tracing_open(struct inode *in + return -ENODEV; + + /* If this file was open for write, then erase contents */ +- if ((file->f_mode & FMODE_WRITE) && +- (file->f_flags & O_TRUNC)) { +- if (tc->cpu == RING_BUFFER_ALL_CPUS) ++ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { ++ int cpu = tracing_get_cpu(inode); ++ ++ if (cpu == RING_BUFFER_ALL_CPUS) + tracing_reset_online_cpus(&tr->trace_buffer); + else +- tracing_reset(&tr->trace_buffer, tc->cpu); ++ tracing_reset(&tr->trace_buffer, cpu); + } + + if (file->f_mode & FMODE_READ) { +- iter = __tracing_open(tr, tc, inode, file, false); ++ iter = __tracing_open(inode, file, false); + if (IS_ERR(iter)) + ret = PTR_ERR(iter); + else if (trace_flags & TRACE_ITER_LATENCY_FMT) +@@ -4672,8 +4668,7 @@ struct ftrace_buffer_info { + #ifdef CONFIG_TRACER_SNAPSHOT + static int tracing_snapshot_open(struct inode *inode, struct file *file) + { +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; ++ struct trace_array *tr = inode->i_private; + struct trace_iterator *iter; + struct seq_file *m; + int ret = 0; +@@ -4682,7 +4677,7 @@ static int tracing_snapshot_open(struct + return -ENODEV; + + if (file->f_mode & FMODE_READ) { +- iter = __tracing_open(tr, tc, inode, file, true); ++ iter = __tracing_open(inode, file, true); + if (IS_ERR(iter)) + ret = PTR_ERR(iter); + } else { +@@ -4699,8 +4694,8 @@ static int tracing_snapshot_open(struct + ret = 0; + + iter->tr = tr; +- iter->trace_buffer = &tc->tr->max_buffer; +- iter->cpu_file = tc->cpu; ++ iter->trace_buffer = &tr->max_buffer; ++ iter->cpu_file = tracing_get_cpu(inode); + m->private = iter; + file->private_data = m; + } +@@ -5517,7 +5512,6 @@ trace_create_cpu_file(const char *name, + static void + tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) + { +- struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu); + struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); + struct dentry *d_cpu; + char cpu_dir[30]; /* 30 characters should be more than enough */ +@@ -5538,7 +5532,7 @@ tracing_init_debugfs_percpu(struct trace + + /* per cpu trace */ + trace_create_cpu_file("trace", 0644, d_cpu, +- &data->trace_cpu, cpu, &tracing_fops); ++ tr, cpu, &tracing_fops); + + trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, + tr, cpu, &tracing_buffers_fops); +@@ -5551,7 +5545,7 @@ tracing_init_debugfs_percpu(struct trace + + #ifdef CONFIG_TRACER_SNAPSHOT + trace_create_cpu_file("snapshot", 0644, d_cpu, +- &data->trace_cpu, cpu, &snapshot_fops); ++ tr, cpu, &snapshot_fops); + + trace_create_cpu_file("snapshot_raw", 0444, d_cpu, + tr, cpu, &snapshot_raw_fops); +@@ -6117,7 +6111,7 @@ init_tracer_debugfs(struct trace_array * + tr, &tracing_iter_fops); + + trace_create_file("trace", 0644, d_tracer, +- (void *)&tr->trace_cpu, &tracing_fops); ++ tr, &tracing_fops); + + trace_create_file("trace_pipe", 0444, d_tracer, + tr, &tracing_pipe_fops); +@@ -6138,11 +6132,11 @@ init_tracer_debugfs(struct trace_array * + &trace_clock_fops); + + trace_create_file("tracing_on", 0644, d_tracer, +- tr, &rb_simple_fops); ++ tr, &rb_simple_fops); + + #ifdef CONFIG_TRACER_SNAPSHOT + trace_create_file("snapshot", 0644, d_tracer, +- (void *)&tr->trace_cpu, &snapshot_fops); ++ tr, &snapshot_fops); + #endif + + for_each_tracing_cpu(cpu) diff --git a/queue-3.10/tracing-change-tracing_pipe_fops-to-rely-on-tracing_get_cpu.patch b/queue-3.10/tracing-change-tracing_pipe_fops-to-rely-on-tracing_get_cpu.patch new file mode 100644 index 00000000000..e9cc5ec9f71 --- /dev/null +++ b/queue-3.10/tracing-change-tracing_pipe_fops-to-rely-on-tracing_get_cpu.patch @@ -0,0 +1,78 @@ +From 15544209cb0b5312e5220a9337a1fe61d1a1f2d9 Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Tue, 23 Jul 2013 17:25:57 +0200 +Subject: tracing: Change tracing_pipe_fops() to rely on tracing_get_cpu() + +From: Oleg Nesterov + +commit 15544209cb0b5312e5220a9337a1fe61d1a1f2d9 upstream. + +tracing_open_pipe() is racy, the memory inode->i_private points to +can be already freed. + +Change debugfs_create_file("trace_pipe", data) callers to to pass +"data = tr", tracing_open_pipe() can use tracing_get_cpu(). + +Link: http://lkml.kernel.org/r/20130723152557.GA23717@redhat.com + +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace.c | 16 +++++++--------- + 1 file changed, 7 insertions(+), 9 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3950,8 +3950,7 @@ tracing_max_lat_write(struct file *filp, + + static int tracing_open_pipe(struct inode *inode, struct file *filp) + { +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; ++ struct trace_array *tr = inode->i_private; + struct trace_iterator *iter; + int ret = 0; + +@@ -3997,9 +3996,9 @@ static int tracing_open_pipe(struct inod + if (trace_clocks[tr->clock_id].in_ns) + iter->iter_flags |= TRACE_FILE_TIME_IN_NS; + +- iter->cpu_file = tc->cpu; +- iter->tr = tc->tr; +- iter->trace_buffer = &tc->tr->trace_buffer; ++ iter->tr = tr; ++ iter->trace_buffer = &tr->trace_buffer; ++ iter->cpu_file = tracing_get_cpu(inode); + mutex_init(&iter->mutex); + filp->private_data = iter; + +@@ -4022,8 +4021,7 @@ fail: + static int tracing_release_pipe(struct inode *inode, struct file *file) + { + struct trace_iterator *iter = file->private_data; +- struct trace_cpu *tc = inode->i_private; +- struct trace_array *tr = tc->tr; ++ struct trace_array *tr = inode->i_private; + + mutex_lock(&trace_types_lock); + +@@ -5563,7 +5561,7 @@ tracing_init_debugfs_percpu(struct trace + + /* per cpu trace_pipe */ + trace_create_cpu_file("trace_pipe", 0444, d_cpu, +- &data->trace_cpu, cpu, &tracing_pipe_fops); ++ tr, cpu, &tracing_pipe_fops); + + /* per cpu trace */ + trace_create_cpu_file("trace", 0644, d_cpu, +@@ -6149,7 +6147,7 @@ init_tracer_debugfs(struct trace_array * + (void *)&tr->trace_cpu, &tracing_fops); + + trace_create_file("trace_pipe", 0444, d_tracer, +- (void *)&tr->trace_cpu, &tracing_pipe_fops); ++ tr, &tracing_pipe_fops); + + trace_create_file("buffer_size_kb", 0644, d_tracer, + (void *)&tr->trace_cpu, &tracing_entries_fops); diff --git a/queue-3.10/tracing-change-tracing_stats_fops-to-rely-on.patch b/queue-3.10/tracing-change-tracing_stats_fops-to-rely-on.patch new file mode 100644 index 00000000000..d825c58340f --- /dev/null +++ b/queue-3.10/tracing-change-tracing_stats_fops-to-rely-on.patch @@ -0,0 +1,80 @@ +From 4d3435b8a4c3357695e09c5e7a3bf73a19fca5b0 Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Tue, 23 Jul 2013 17:26:03 +0200 +Subject: tracing: Change tracing_stats_fops to rely on + tracing_get_cpu() + +From: Oleg Nesterov + +commit 4d3435b8a4c3357695e09c5e7a3bf73a19fca5b0 upstream. + +tracing_open_generic_tc() is racy, the memory inode->i_private +points to can be already freed. + +1. Change one of its users, tracing_stats_fops, to use + tracing_*_generic_tr() instead. + +2. Change trace_create_cpu_file("stats", data) to pass "data = tr". + +3. Change tracing_stats_read() to use tracing_get_cpu(). + +Link: http://lkml.kernel.org/r/20130723152603.GA23727@redhat.com + +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2973,7 +2973,6 @@ int tracing_open_generic_tr(struct inode + filp->private_data = inode->i_private; + + return 0; +- + } + + int tracing_open_generic_tc(struct inode *inode, struct file *filp) +@@ -5277,14 +5276,14 @@ static ssize_t + tracing_stats_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) + { +- struct trace_cpu *tc = filp->private_data; +- struct trace_array *tr = tc->tr; ++ struct inode *inode = file_inode(filp); ++ struct trace_array *tr = inode->i_private; + struct trace_buffer *trace_buf = &tr->trace_buffer; ++ int cpu = tracing_get_cpu(inode); + struct trace_seq *s; + unsigned long cnt; + unsigned long long t; + unsigned long usec_rem; +- int cpu = tc->cpu; + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) +@@ -5337,10 +5336,10 @@ tracing_stats_read(struct file *filp, ch + } + + static const struct file_operations tracing_stats_fops = { +- .open = tracing_open_generic_tc, ++ .open = tracing_open_generic_tr, + .read = tracing_stats_read, + .llseek = generic_file_llseek, +- .release = tracing_release_generic_tc, ++ .release = tracing_release_generic_tr, + }; + + #ifdef CONFIG_DYNAMIC_FTRACE +@@ -5570,7 +5569,7 @@ tracing_init_debugfs_percpu(struct trace + tr, cpu, &tracing_buffers_fops); + + trace_create_cpu_file("stats", 0444, d_cpu, +- &data->trace_cpu, cpu, &tracing_stats_fops); ++ tr, cpu, &tracing_stats_fops); + + trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, + &data->trace_cpu, cpu, &tracing_entries_fops); diff --git a/queue-3.10/tracing-introduce-trace_create_cpu_file-and.patch b/queue-3.10/tracing-introduce-trace_create_cpu_file-and.patch new file mode 100644 index 00000000000..e1875694d91 --- /dev/null +++ b/queue-3.10/tracing-introduce-trace_create_cpu_file-and.patch @@ -0,0 +1,138 @@ +From 649e9c70da6bfbeb563193a35d3424a5aa7c0d38 Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Tue, 23 Jul 2013 17:25:54 +0200 +Subject: tracing: Introduce trace_create_cpu_file() and + tracing_get_cpu() + +From: Oleg Nesterov + +commit 649e9c70da6bfbeb563193a35d3424a5aa7c0d38 upstream. + +Every "file_operations" used by tracing_init_debugfs_percpu is buggy. +f_op->open/etc does: + + 1. struct trace_cpu *tc = inode->i_private; + struct trace_array *tr = tc->tr; + + 2. trace_array_get(tr) or fail; + + 3. do_something(tc); + +But tc (and tr) can be already freed before trace_array_get() is called. +And it doesn't matter whether this file is per-cpu or it was created by +init_tracer_debugfs(), free_percpu() or kfree() are equally bad. + +Note that even 1. is not safe, the freed memory can be unmapped. But even +if it was safe trace_array_get() can wrongly succeed if we also race with +the next new_instance_create() which can re-allocate the same tr, or tc +was overwritten and ->tr points to the valid tr. In this case 3. uses the +freed/reused memory. + +Add the new trivial helper, trace_create_cpu_file() which simply calls +trace_create_file() and encodes "cpu" in "struct inode". Another helper, +tracing_get_cpu() will be used to read cpu_nr-or-RING_BUFFER_ALL_CPUS. + +The patch abuses ->i_cdev to encode the number, it is never used unless +the file is S_ISCHR(). But we could use something else, say, i_bytes or +even ->d_fsdata. In any case this hack is hidden inside these 2 helpers, +it would be trivial to change them if needed. + +This patch only changes tracing_init_debugfs_percpu() to use the new +trace_create_cpu_file(), the next patches will change file_operations. + +Note: tracing_get_cpu(inode) is always safe but you can't trust the +result unless trace_array_get() was called, without trace_types_lock +which acts as a barrier it can wrongly return RING_BUFFER_ALL_CPUS. + +Link: http://lkml.kernel.org/r/20130723152554.GA23710@redhat.com + +Cc: Al Viro +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace.c | 50 ++++++++++++++++++++++++++++++++++++-------------- + 1 file changed, 36 insertions(+), 14 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2834,6 +2834,17 @@ static int s_show(struct seq_file *m, vo + return 0; + } + ++/* ++ * Should be used after trace_array_get(), trace_types_lock ++ * ensures that i_cdev was already initialized. ++ */ ++static inline int tracing_get_cpu(struct inode *inode) ++{ ++ if (inode->i_cdev) /* See trace_create_cpu_file() */ ++ return (long)inode->i_cdev - 1; ++ return RING_BUFFER_ALL_CPUS; ++} ++ + static const struct seq_operations tracer_seq_ops = { + .start = s_start, + .next = s_next, +@@ -5521,6 +5532,17 @@ static struct dentry *tracing_dentry_per + return tr->percpu_dir; + } + ++static struct dentry * ++trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, ++ void *data, long cpu, const struct file_operations *fops) ++{ ++ struct dentry *ret = trace_create_file(name, mode, parent, data, fops); ++ ++ if (ret) /* See tracing_get_cpu() */ ++ ret->d_inode->i_cdev = (void *)(cpu + 1); ++ return ret; ++} ++ + static void + tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) + { +@@ -5540,28 +5562,28 @@ tracing_init_debugfs_percpu(struct trace + } + + /* per cpu trace_pipe */ +- trace_create_file("trace_pipe", 0444, d_cpu, +- (void *)&data->trace_cpu, &tracing_pipe_fops); ++ trace_create_cpu_file("trace_pipe", 0444, d_cpu, ++ &data->trace_cpu, cpu, &tracing_pipe_fops); + + /* per cpu trace */ +- trace_create_file("trace", 0644, d_cpu, +- (void *)&data->trace_cpu, &tracing_fops); ++ trace_create_cpu_file("trace", 0644, d_cpu, ++ &data->trace_cpu, cpu, &tracing_fops); + +- trace_create_file("trace_pipe_raw", 0444, d_cpu, +- (void *)&data->trace_cpu, &tracing_buffers_fops); ++ trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, ++ &data->trace_cpu, cpu, &tracing_buffers_fops); + +- trace_create_file("stats", 0444, d_cpu, +- (void *)&data->trace_cpu, &tracing_stats_fops); ++ trace_create_cpu_file("stats", 0444, d_cpu, ++ &data->trace_cpu, cpu, &tracing_stats_fops); + +- trace_create_file("buffer_size_kb", 0444, d_cpu, +- (void *)&data->trace_cpu, &tracing_entries_fops); ++ trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, ++ &data->trace_cpu, cpu, &tracing_entries_fops); + + #ifdef CONFIG_TRACER_SNAPSHOT +- trace_create_file("snapshot", 0644, d_cpu, +- (void *)&data->trace_cpu, &snapshot_fops); ++ trace_create_cpu_file("snapshot", 0644, d_cpu, ++ &data->trace_cpu, cpu, &snapshot_fops); + +- trace_create_file("snapshot_raw", 0444, d_cpu, +- (void *)&data->trace_cpu, &snapshot_raw_fops); ++ trace_create_cpu_file("snapshot_raw", 0444, d_cpu, ++ &data->trace_cpu, cpu, &snapshot_raw_fops); + #endif + } + diff --git a/queue-3.10/tracing-turn-event-id-i_private-into-call-event.type.patch b/queue-3.10/tracing-turn-event-id-i_private-into-call-event.type.patch new file mode 100644 index 00000000000..1e8e4ff2156 --- /dev/null +++ b/queue-3.10/tracing-turn-event-id-i_private-into-call-event.type.patch @@ -0,0 +1,91 @@ +From 1a11126bcb7c93c289bf3218fa546fd3b0c0df8b Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Fri, 26 Jul 2013 19:25:32 +0200 +Subject: tracing: Turn event/id->i_private into call->event.type + +From: Oleg Nesterov + +commit 1a11126bcb7c93c289bf3218fa546fd3b0c0df8b upstream. + +event_id_read() is racy, ftrace_event_call can be already freed +by trace_remove_event_call() callers. + +Change event_create_dir() to pass "data = call->event.type", this +is all event_id_read() needs. ftrace_event_id_fops no longer needs +tracing_open_generic(). + +We add the new helper, event_file_data(), to read ->i_private, it +will have more users. + +Note: currently ACCESS_ONCE() and "id != 0" check are not needed, +but we are going to change event_remove/rmdir to clear ->i_private. + +Link: http://lkml.kernel.org/r/20130726172532.GA3605@redhat.com + +Reviewed-by: Masami Hiramatsu +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/trace/trace_events.c | 17 ++++++++++++----- + 1 file changed, 12 insertions(+), 5 deletions(-) + +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -407,6 +407,11 @@ static void put_system(struct ftrace_sub + mutex_unlock(&event_mutex); + } + ++static void *event_file_data(struct file *filp) ++{ ++ return ACCESS_ONCE(file_inode(filp)->i_private); ++} ++ + /* + * Open and update trace_array ref count. + * Must have the current trace_array passed to it. +@@ -960,19 +965,22 @@ static int trace_format_open(struct inod + static ssize_t + event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) + { +- struct ftrace_event_call *call = filp->private_data; ++ int id = (long)event_file_data(filp); + struct trace_seq *s; + int r; + + if (*ppos) + return 0; + ++ if (unlikely(!id)) ++ return -ENODEV; ++ + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + trace_seq_init(s); +- trace_seq_printf(s, "%d\n", call->event.type); ++ trace_seq_printf(s, "%d\n", id); + + r = simple_read_from_buffer(ubuf, cnt, ppos, + s->buffer, s->len); +@@ -1263,7 +1271,6 @@ static const struct file_operations ftra + }; + + static const struct file_operations ftrace_event_id_fops = { +- .open = tracing_open_generic, + .read = event_id_read, + .llseek = default_llseek, + }; +@@ -1511,8 +1518,8 @@ event_create_dir(struct dentry *parent, + + #ifdef CONFIG_PERF_EVENTS + if (call->event.type && call->class->reg) +- trace_create_file("id", 0444, file->dir, call, +- id); ++ trace_create_file("id", 0444, file->dir, ++ (void *)(long)call->event.type, id); + #endif + + /*