]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.4/tracing-kdb-fix-ftdump-to-not-sleep.patch
Fixes for 4.19
[thirdparty/kernel/stable-queue.git] / queue-4.4 / tracing-kdb-fix-ftdump-to-not-sleep.patch
1 From 140a0e71c7c6b557979afe38a4535f959edcc299 Mon Sep 17 00:00:00 2001
2 From: Douglas Anderson <dianders@chromium.org>
3 Date: Fri, 8 Mar 2019 11:32:04 -0800
4 Subject: tracing: kdb: Fix ftdump to not sleep
5
6 [ Upstream commit 31b265b3baaf55f209229888b7ffea523ddab366 ]
7
8 As reported back in 2016-11 [1], the "ftdump" kdb command triggers a
9 BUG for "sleeping function called from invalid context".
10
11 kdb's "ftdump" command wants to call ring_buffer_read_prepare() in
12 atomic context. A very simple solution for this is to add allocation
13 flags to ring_buffer_read_prepare() so kdb can call it without
14 triggering the allocation error. This patch does that.
15
16 Note that in the original email thread about this, it was suggested
17 that perhaps the solution for kdb was to either preallocate the buffer
18 ahead of time or create our own iterator. I'm hoping that this
19 alternative of adding allocation flags to ring_buffer_read_prepare()
20 can be considered since it means I don't need to duplicate more of the
21 core trace code into "trace_kdb.c" (for either creating my own
22 iterator or re-preparing a ring allocator whose memory was already
23 allocated).
24
25 NOTE: another option for kdb is to actually figure out how to make it
26 reuse the existing ftrace_dump() function and totally eliminate the
27 duplication. This sounds very appealing and actually works (the "sr
28 z" command can be seen to properly dump the ftrace buffer). The
29 downside here is that ftrace_dump() fully consumes the trace buffer.
30 Unless that is changed I'd rather not use it because it means "ftdump
31 | grep xyz" won't be very useful to search the ftrace buffer since it
32 will throw away the whole trace on the first grep. A future patch to
33 dump only the last few lines of the buffer will also be hard to
34 implement.
35
36 [1] https://lkml.kernel.org/r/20161117191605.GA21459@google.com
37
38 Link: http://lkml.kernel.org/r/20190308193205.213659-1-dianders@chromium.org
39
40 Reported-by: Brian Norris <briannorris@chromium.org>
41 Signed-off-by: Douglas Anderson <dianders@chromium.org>
42 Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
43 Signed-off-by: Sasha Levin <sashal@kernel.org>
44 ---
45 include/linux/ring_buffer.h | 2 +-
46 kernel/trace/ring_buffer.c | 5 +++--
47 kernel/trace/trace.c | 6 ++++--
48 kernel/trace/trace_kdb.c | 6 ++++--
49 4 files changed, 12 insertions(+), 7 deletions(-)
50
51 diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
52 index 19d0778ec382..121c8f99ecdd 100644
53 --- a/include/linux/ring_buffer.h
54 +++ b/include/linux/ring_buffer.h
55 @@ -125,7 +125,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
56 unsigned long *lost_events);
57
58 struct ring_buffer_iter *
59 -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
60 +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
61 void ring_buffer_read_prepare_sync(void);
62 void ring_buffer_read_start(struct ring_buffer_iter *iter);
63 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
64 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
65 index 74b20e3ab8c6..5e091614fe39 100644
66 --- a/kernel/trace/ring_buffer.c
67 +++ b/kernel/trace/ring_buffer.c
68 @@ -4042,6 +4042,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
69 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
70 * @buffer: The ring buffer to read from
71 * @cpu: The cpu buffer to iterate over
72 + * @flags: gfp flags to use for memory allocation
73 *
74 * This performs the initial preparations necessary to iterate
75 * through the buffer. Memory is allocated, buffer recording
76 @@ -4059,7 +4060,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
77 * This overall must be paired with ring_buffer_read_finish.
78 */
79 struct ring_buffer_iter *
80 -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
81 +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
82 {
83 struct ring_buffer_per_cpu *cpu_buffer;
84 struct ring_buffer_iter *iter;
85 @@ -4067,7 +4068,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
86 if (!cpumask_test_cpu(cpu, buffer->cpumask))
87 return NULL;
88
89 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
90 + iter = kmalloc(sizeof(*iter), flags);
91 if (!iter)
92 return NULL;
93
94 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95 index 8c097de8a596..ae00e68ceae3 100644
96 --- a/kernel/trace/trace.c
97 +++ b/kernel/trace/trace.c
98 @@ -3122,7 +3122,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
99 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
100 for_each_tracing_cpu(cpu) {
101 iter->buffer_iter[cpu] =
102 - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
103 + ring_buffer_read_prepare(iter->trace_buffer->buffer,
104 + cpu, GFP_KERNEL);
105 }
106 ring_buffer_read_prepare_sync();
107 for_each_tracing_cpu(cpu) {
108 @@ -3132,7 +3133,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
109 } else {
110 cpu = iter->cpu_file;
111 iter->buffer_iter[cpu] =
112 - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
113 + ring_buffer_read_prepare(iter->trace_buffer->buffer,
114 + cpu, GFP_KERNEL);
115 ring_buffer_read_prepare_sync();
116 ring_buffer_read_start(iter->buffer_iter[cpu]);
117 tracing_iter_reset(iter, cpu);
118 diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
119 index 57149bce6aad..896458285fdd 100644
120 --- a/kernel/trace/trace_kdb.c
121 +++ b/kernel/trace/trace_kdb.c
122 @@ -50,14 +50,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
123 if (cpu_file == RING_BUFFER_ALL_CPUS) {
124 for_each_tracing_cpu(cpu) {
125 iter.buffer_iter[cpu] =
126 - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
127 + ring_buffer_read_prepare(iter.trace_buffer->buffer,
128 + cpu, GFP_ATOMIC);
129 ring_buffer_read_start(iter.buffer_iter[cpu]);
130 tracing_iter_reset(&iter, cpu);
131 }
132 } else {
133 iter.cpu_file = cpu_file;
134 iter.buffer_iter[cpu_file] =
135 - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
136 + ring_buffer_read_prepare(iter.trace_buffer->buffer,
137 + cpu_file, GFP_ATOMIC);
138 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
139 tracing_iter_reset(&iter, cpu_file);
140 }
141 --
142 2.19.1
143