]>
Commit | Line | Data |
---|---|---|
ba172962 SL |
1 | From 7b44addab04c6be1ad8799a4d0ccefb9a53dfceb Mon Sep 17 00:00:00 2001 |
2 | From: Douglas Anderson <dianders@chromium.org> | |
3 | Date: Fri, 8 Mar 2019 11:32:04 -0800 | |
4 | Subject: tracing: kdb: Fix ftdump to not sleep | |
5 | ||
6 | [ Upstream commit 31b265b3baaf55f209229888b7ffea523ddab366 ] | |
7 | ||
8 | As reported back in 2016-11 [1], the "ftdump" kdb command triggers a | |
9 | BUG for "sleeping function called from invalid context". | |
10 | ||
11 | kdb's "ftdump" command wants to call ring_buffer_read_prepare() in | |
12 | atomic context. A very simple solution for this is to add allocation | |
13 | flags to ring_buffer_read_prepare() so kdb can call it without | |
14 | triggering the allocation error. This patch does that. | |
15 | ||
16 | Note that in the original email thread about this, it was suggested | |
17 | that perhaps the solution for kdb was to either preallocate the buffer | |
18 | ahead of time or create our own iterator. I'm hoping that this | |
19 | alternative of adding allocation flags to ring_buffer_read_prepare() | |
20 | can be considered since it means I don't need to duplicate more of the | |
21 | core trace code into "trace_kdb.c" (for either creating my own | |
22 | iterator or re-preparing a ring allocator whose memory was already | |
23 | allocated). | |
24 | ||
25 | NOTE: another option for kdb is to actually figure out how to make it | |
26 | reuse the existing ftrace_dump() function and totally eliminate the | |
27 | duplication. This sounds very appealing and actually works (the "sr | |
28 | z" command can be seen to properly dump the ftrace buffer). The | |
29 | downside here is that ftrace_dump() fully consumes the trace buffer. | |
30 | Unless that is changed I'd rather not use it because it means "ftdump | |
31 | | grep xyz" won't be very useful to search the ftrace buffer since it | |
32 | will throw away the whole trace on the first grep. A future patch to | |
33 | dump only the last few lines of the buffer will also be hard to | |
34 | implement. | |
35 | ||
36 | [1] https://lkml.kernel.org/r/20161117191605.GA21459@google.com | |
37 | ||
38 | Link: http://lkml.kernel.org/r/20190308193205.213659-1-dianders@chromium.org | |
39 | ||
40 | Reported-by: Brian Norris <briannorris@chromium.org> | |
41 | Signed-off-by: Douglas Anderson <dianders@chromium.org> | |
42 | Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> | |
43 | Signed-off-by: Sasha Levin <sashal@kernel.org> | |
44 | --- | |
45 | include/linux/ring_buffer.h | 2 +- | |
46 | kernel/trace/ring_buffer.c | 5 +++-- | |
47 | kernel/trace/trace.c | 6 ++++-- | |
48 | kernel/trace/trace_kdb.c | 6 ++++-- | |
49 | 4 files changed, 12 insertions(+), 7 deletions(-) | |
50 | ||
51 | diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h | |
52 | index 0940fda59872..941bfd9b3c89 100644 | |
53 | --- a/include/linux/ring_buffer.h | |
54 | +++ b/include/linux/ring_buffer.h | |
55 | @@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |
56 | unsigned long *lost_events); | |
57 | ||
58 | struct ring_buffer_iter * | |
59 | -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); | |
60 | +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); | |
61 | void ring_buffer_read_prepare_sync(void); | |
62 | void ring_buffer_read_start(struct ring_buffer_iter *iter); | |
63 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); | |
64 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c | |
65 | index 65bd4616220d..34b4c32b0692 100644 | |
66 | --- a/kernel/trace/ring_buffer.c | |
67 | +++ b/kernel/trace/ring_buffer.c | |
68 | @@ -4141,6 +4141,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); | |
69 | * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer | |
70 | * @buffer: The ring buffer to read from | |
71 | * @cpu: The cpu buffer to iterate over | |
72 | + * @flags: gfp flags to use for memory allocation | |
73 | * | |
74 | * This performs the initial preparations necessary to iterate | |
75 | * through the buffer. Memory is allocated, buffer recording | |
76 | @@ -4158,7 +4159,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); | |
77 | * This overall must be paired with ring_buffer_read_finish. | |
78 | */ | |
79 | struct ring_buffer_iter * | |
80 | -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | |
81 | +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags) | |
82 | { | |
83 | struct ring_buffer_per_cpu *cpu_buffer; | |
84 | struct ring_buffer_iter *iter; | |
85 | @@ -4166,7 +4167,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | |
86 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | |
87 | return NULL; | |
88 | ||
89 | - iter = kmalloc(sizeof(*iter), GFP_KERNEL); | |
90 | + iter = kmalloc(sizeof(*iter), flags); | |
91 | if (!iter) | |
92 | return NULL; | |
93 | ||
94 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c | |
95 | index 1f96b292df31..c65cea71d1ee 100644 | |
96 | --- a/kernel/trace/trace.c | |
97 | +++ b/kernel/trace/trace.c | |
98 | @@ -3903,7 +3903,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |
99 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { | |
100 | for_each_tracing_cpu(cpu) { | |
101 | iter->buffer_iter[cpu] = | |
102 | - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); | |
103 | + ring_buffer_read_prepare(iter->trace_buffer->buffer, | |
104 | + cpu, GFP_KERNEL); | |
105 | } | |
106 | ring_buffer_read_prepare_sync(); | |
107 | for_each_tracing_cpu(cpu) { | |
108 | @@ -3913,7 +3914,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |
109 | } else { | |
110 | cpu = iter->cpu_file; | |
111 | iter->buffer_iter[cpu] = | |
112 | - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); | |
113 | + ring_buffer_read_prepare(iter->trace_buffer->buffer, | |
114 | + cpu, GFP_KERNEL); | |
115 | ring_buffer_read_prepare_sync(); | |
116 | ring_buffer_read_start(iter->buffer_iter[cpu]); | |
117 | tracing_iter_reset(iter, cpu); | |
118 | diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c | |
119 | index d953c163a079..810d78a8d14c 100644 | |
120 | --- a/kernel/trace/trace_kdb.c | |
121 | +++ b/kernel/trace/trace_kdb.c | |
122 | @@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) | |
123 | if (cpu_file == RING_BUFFER_ALL_CPUS) { | |
124 | for_each_tracing_cpu(cpu) { | |
125 | iter.buffer_iter[cpu] = | |
126 | - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); | |
127 | + ring_buffer_read_prepare(iter.trace_buffer->buffer, | |
128 | + cpu, GFP_ATOMIC); | |
129 | ring_buffer_read_start(iter.buffer_iter[cpu]); | |
130 | tracing_iter_reset(&iter, cpu); | |
131 | } | |
132 | } else { | |
133 | iter.cpu_file = cpu_file; | |
134 | iter.buffer_iter[cpu_file] = | |
135 | - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file); | |
136 | + ring_buffer_read_prepare(iter.trace_buffer->buffer, | |
137 | + cpu_file, GFP_ATOMIC); | |
138 | ring_buffer_read_start(iter.buffer_iter[cpu_file]); | |
139 | tracing_iter_reset(&iter, cpu_file); | |
140 | } | |
141 | -- | |
142 | 2.19.1 | |
143 |