]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.19.34/tracing-kdb-fix-ftdump-to-not-sleep.patch
Linux 4.19.34
[thirdparty/kernel/stable-queue.git] / releases / 4.19.34 / tracing-kdb-fix-ftdump-to-not-sleep.patch
CommitLineData
ba172962
SL
1From 7b44addab04c6be1ad8799a4d0ccefb9a53dfceb Mon Sep 17 00:00:00 2001
2From: Douglas Anderson <dianders@chromium.org>
3Date: Fri, 8 Mar 2019 11:32:04 -0800
4Subject: tracing: kdb: Fix ftdump to not sleep
5
6[ Upstream commit 31b265b3baaf55f209229888b7ffea523ddab366 ]
7
8As reported back in 2016-11 [1], the "ftdump" kdb command triggers a
9BUG for "sleeping function called from invalid context".
10
11kdb's "ftdump" command wants to call ring_buffer_read_prepare() in
12atomic context. A very simple solution for this is to add allocation
13flags to ring_buffer_read_prepare() so kdb can call it without
14triggering the allocation error. This patch does that.
15
16Note that in the original email thread about this, it was suggested
17that perhaps the solution for kdb was to either preallocate the buffer
18ahead of time or create our own iterator. I'm hoping that this
19alternative of adding allocation flags to ring_buffer_read_prepare()
20can be considered since it means I don't need to duplicate more of the
21core trace code into "trace_kdb.c" (for either creating my own
22iterator or re-preparing a ring allocator whose memory was already
23allocated).
24
25NOTE: another option for kdb is to actually figure out how to make it
26reuse the existing ftrace_dump() function and totally eliminate the
27duplication. This sounds very appealing and actually works (the "sr
28z" command can be seen to properly dump the ftrace buffer). The
29downside here is that ftrace_dump() fully consumes the trace buffer.
30Unless that is changed I'd rather not use it because it means "ftdump
31| grep xyz" won't be very useful to search the ftrace buffer since it
32will throw away the whole trace on the first grep. A future patch to
33dump only the last few lines of the buffer will also be hard to
34implement.
35
36[1] https://lkml.kernel.org/r/20161117191605.GA21459@google.com
37
38Link: http://lkml.kernel.org/r/20190308193205.213659-1-dianders@chromium.org
39
40Reported-by: Brian Norris <briannorris@chromium.org>
41Signed-off-by: Douglas Anderson <dianders@chromium.org>
42Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
43Signed-off-by: Sasha Levin <sashal@kernel.org>
44---
45 include/linux/ring_buffer.h | 2 +-
46 kernel/trace/ring_buffer.c | 5 +++--
47 kernel/trace/trace.c | 6 ++++--
48 kernel/trace/trace_kdb.c | 6 ++++--
49 4 files changed, 12 insertions(+), 7 deletions(-)
50
51diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
52index 0940fda59872..941bfd9b3c89 100644
53--- a/include/linux/ring_buffer.h
54+++ b/include/linux/ring_buffer.h
55@@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
56 unsigned long *lost_events);
57
58 struct ring_buffer_iter *
59-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
60+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
61 void ring_buffer_read_prepare_sync(void);
62 void ring_buffer_read_start(struct ring_buffer_iter *iter);
63 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
64diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
65index 65bd4616220d..34b4c32b0692 100644
66--- a/kernel/trace/ring_buffer.c
67+++ b/kernel/trace/ring_buffer.c
68@@ -4141,6 +4141,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
69 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
70 * @buffer: The ring buffer to read from
71 * @cpu: The cpu buffer to iterate over
72+ * @flags: gfp flags to use for memory allocation
73 *
74 * This performs the initial preparations necessary to iterate
75 * through the buffer. Memory is allocated, buffer recording
76@@ -4158,7 +4159,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
77 * This overall must be paired with ring_buffer_read_finish.
78 */
79 struct ring_buffer_iter *
80-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
81+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
82 {
83 struct ring_buffer_per_cpu *cpu_buffer;
84 struct ring_buffer_iter *iter;
85@@ -4166,7 +4167,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
86 if (!cpumask_test_cpu(cpu, buffer->cpumask))
87 return NULL;
88
89- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
90+ iter = kmalloc(sizeof(*iter), flags);
91 if (!iter)
92 return NULL;
93
94diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95index 1f96b292df31..c65cea71d1ee 100644
96--- a/kernel/trace/trace.c
97+++ b/kernel/trace/trace.c
98@@ -3903,7 +3903,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
99 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
100 for_each_tracing_cpu(cpu) {
101 iter->buffer_iter[cpu] =
102- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
103+ ring_buffer_read_prepare(iter->trace_buffer->buffer,
104+ cpu, GFP_KERNEL);
105 }
106 ring_buffer_read_prepare_sync();
107 for_each_tracing_cpu(cpu) {
108@@ -3913,7 +3914,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
109 } else {
110 cpu = iter->cpu_file;
111 iter->buffer_iter[cpu] =
112- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
113+ ring_buffer_read_prepare(iter->trace_buffer->buffer,
114+ cpu, GFP_KERNEL);
115 ring_buffer_read_prepare_sync();
116 ring_buffer_read_start(iter->buffer_iter[cpu]);
117 tracing_iter_reset(iter, cpu);
118diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
119index d953c163a079..810d78a8d14c 100644
120--- a/kernel/trace/trace_kdb.c
121+++ b/kernel/trace/trace_kdb.c
122@@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
123 if (cpu_file == RING_BUFFER_ALL_CPUS) {
124 for_each_tracing_cpu(cpu) {
125 iter.buffer_iter[cpu] =
126- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
127+ ring_buffer_read_prepare(iter.trace_buffer->buffer,
128+ cpu, GFP_ATOMIC);
129 ring_buffer_read_start(iter.buffer_iter[cpu]);
130 tracing_iter_reset(&iter, cpu);
131 }
132 } else {
133 iter.cpu_file = cpu_file;
134 iter.buffer_iter[cpu_file] =
135- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
136+ ring_buffer_read_prepare(iter.trace_buffer->buffer,
137+ cpu_file, GFP_ATOMIC);
138 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
139 tracing_iter_reset(&iter, cpu_file);
140 }
141--
1422.19.1
143