]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/printk/printk_safe.c
Merge branch 'for-5.2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[thirdparty/linux.git] / kernel / printk / printk_safe.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
4 */
5
6 #include <linux/preempt.h>
7 #include <linux/spinlock.h>
8 #include <linux/debug_locks.h>
9 #include <linux/smp.h>
10 #include <linux/cpumask.h>
11 #include <linux/irq_work.h>
12 #include <linux/printk.h>
13
14 #include "internal.h"
15
16 /*
17 * printk() could not take logbuf_lock in NMI context. Instead,
18 * it uses an alternative implementation that temporary stores
19 * the strings into a per-CPU buffer. The content of the buffer
20 * is later flushed into the main ring buffer via IRQ work.
21 *
22 * The alternative implementation is chosen transparently
23 * by examinig current printk() context mask stored in @printk_context
24 * per-CPU variable.
25 *
26 * The implementation allows to flush the strings also from another CPU.
27 * There are situations when we want to make sure that all buffers
28 * were handled or when IRQs are blocked.
29 */
30 static int printk_safe_irq_ready __read_mostly;
31
32 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
33 sizeof(atomic_t) - \
34 sizeof(atomic_t) - \
35 sizeof(struct irq_work))
36
37 struct printk_safe_seq_buf {
38 atomic_t len; /* length of written data */
39 atomic_t message_lost;
40 struct irq_work work; /* IRQ work that flushes the buffer */
41 unsigned char buffer[SAFE_LOG_BUF_LEN];
42 };
43
44 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
45 static DEFINE_PER_CPU(int, printk_context);
46
47 #ifdef CONFIG_PRINTK_NMI
48 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
49 #endif
50
51 /* Get flushed in a more safe context. */
52 static void queue_flush_work(struct printk_safe_seq_buf *s)
53 {
54 if (printk_safe_irq_ready)
55 irq_work_queue(&s->work);
56 }
57
58 /*
59 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
60 * have dedicated buffers, because otherwise printk-safe preempted by
61 * NMI-printk would have overwritten the NMI messages.
62 *
63 * The messages are flushed from irq work (or from panic()), possibly,
64 * from other CPU, concurrently with printk_safe_log_store(). Should this
65 * happen, printk_safe_log_store() will notice the buffer->len mismatch
66 * and repeat the write.
67 */
68 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
69 const char *fmt, va_list args)
70 {
71 int add;
72 size_t len;
73 va_list ap;
74
75 again:
76 len = atomic_read(&s->len);
77
78 /* The trailing '\0' is not counted into len. */
79 if (len >= sizeof(s->buffer) - 1) {
80 atomic_inc(&s->message_lost);
81 queue_flush_work(s);
82 return 0;
83 }
84
85 /*
86 * Make sure that all old data have been read before the buffer
87 * was reset. This is not needed when we just append data.
88 */
89 if (!len)
90 smp_rmb();
91
92 va_copy(ap, args);
93 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
94 va_end(ap);
95 if (!add)
96 return 0;
97
98 /*
99 * Do it once again if the buffer has been flushed in the meantime.
100 * Note that atomic_cmpxchg() is an implicit memory barrier that
101 * makes sure that the data were written before updating s->len.
102 */
103 if (atomic_cmpxchg(&s->len, len, len + add) != len)
104 goto again;
105
106 queue_flush_work(s);
107 return add;
108 }
109
110 static inline void printk_safe_flush_line(const char *text, int len)
111 {
112 /*
113 * Avoid any console drivers calls from here, because we may be
114 * in NMI or printk_safe context (when in panic). The messages
115 * must go only into the ring buffer at this stage. Consoles will
116 * get explicitly called later when a crashdump is not generated.
117 */
118 printk_deferred("%.*s", len, text);
119 }
120
121 /* printk part of the temporary buffer line by line */
122 static int printk_safe_flush_buffer(const char *start, size_t len)
123 {
124 const char *c, *end;
125 bool header;
126
127 c = start;
128 end = start + len;
129 header = true;
130
131 /* Print line by line. */
132 while (c < end) {
133 if (*c == '\n') {
134 printk_safe_flush_line(start, c - start + 1);
135 start = ++c;
136 header = true;
137 continue;
138 }
139
140 /* Handle continuous lines or missing new line. */
141 if ((c + 1 < end) && printk_get_level(c)) {
142 if (header) {
143 c = printk_skip_level(c);
144 continue;
145 }
146
147 printk_safe_flush_line(start, c - start);
148 start = c++;
149 header = true;
150 continue;
151 }
152
153 header = false;
154 c++;
155 }
156
157 /* Check if there was a partial line. Ignore pure header. */
158 if (start < end && !header) {
159 static const char newline[] = KERN_CONT "\n";
160
161 printk_safe_flush_line(start, end - start);
162 printk_safe_flush_line(newline, strlen(newline));
163 }
164
165 return len;
166 }
167
168 static void report_message_lost(struct printk_safe_seq_buf *s)
169 {
170 int lost = atomic_xchg(&s->message_lost, 0);
171
172 if (lost)
173 printk_deferred("Lost %d message(s)!\n", lost);
174 }
175
176 /*
177 * Flush data from the associated per-CPU buffer. The function
178 * can be called either via IRQ work or independently.
179 */
180 static void __printk_safe_flush(struct irq_work *work)
181 {
182 static raw_spinlock_t read_lock =
183 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
184 struct printk_safe_seq_buf *s =
185 container_of(work, struct printk_safe_seq_buf, work);
186 unsigned long flags;
187 size_t len;
188 int i;
189
190 /*
191 * The lock has two functions. First, one reader has to flush all
192 * available message to make the lockless synchronization with
193 * writers easier. Second, we do not want to mix messages from
194 * different CPUs. This is especially important when printing
195 * a backtrace.
196 */
197 raw_spin_lock_irqsave(&read_lock, flags);
198
199 i = 0;
200 more:
201 len = atomic_read(&s->len);
202
203 /*
204 * This is just a paranoid check that nobody has manipulated
205 * the buffer an unexpected way. If we printed something then
206 * @len must only increase. Also it should never overflow the
207 * buffer size.
208 */
209 if ((i && i >= len) || len > sizeof(s->buffer)) {
210 const char *msg = "printk_safe_flush: internal error\n";
211
212 printk_safe_flush_line(msg, strlen(msg));
213 len = 0;
214 }
215
216 if (!len)
217 goto out; /* Someone else has already flushed the buffer. */
218
219 /* Make sure that data has been written up to the @len */
220 smp_rmb();
221 i += printk_safe_flush_buffer(s->buffer + i, len - i);
222
223 /*
224 * Check that nothing has got added in the meantime and truncate
225 * the buffer. Note that atomic_cmpxchg() is an implicit memory
226 * barrier that makes sure that the data were copied before
227 * updating s->len.
228 */
229 if (atomic_cmpxchg(&s->len, len, 0) != len)
230 goto more;
231
232 out:
233 report_message_lost(s);
234 raw_spin_unlock_irqrestore(&read_lock, flags);
235 }
236
237 /**
238 * printk_safe_flush - flush all per-cpu nmi buffers.
239 *
240 * The buffers are flushed automatically via IRQ work. This function
241 * is useful only when someone wants to be sure that all buffers have
242 * been flushed at some point.
243 */
244 void printk_safe_flush(void)
245 {
246 int cpu;
247
248 for_each_possible_cpu(cpu) {
249 #ifdef CONFIG_PRINTK_NMI
250 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
251 #endif
252 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
253 }
254 }
255
256 /**
257 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
258 * goes down.
259 *
260 * Similar to printk_safe_flush() but it can be called even in NMI context when
261 * the system goes down. It does the best effort to get NMI messages into
262 * the main ring buffer.
263 *
264 * Note that it could try harder when there is only one CPU online.
265 */
266 void printk_safe_flush_on_panic(void)
267 {
268 /*
269 * Make sure that we could access the main ring buffer.
270 * Do not risk a double release when more CPUs are up.
271 */
272 if (raw_spin_is_locked(&logbuf_lock)) {
273 if (num_online_cpus() > 1)
274 return;
275
276 debug_locks_off();
277 raw_spin_lock_init(&logbuf_lock);
278 }
279
280 printk_safe_flush();
281 }
282
283 #ifdef CONFIG_PRINTK_NMI
284 /*
285 * Safe printk() for NMI context. It uses a per-CPU buffer to
286 * store the message. NMIs are not nested, so there is always only
287 * one writer running. But the buffer might get flushed from another
288 * CPU, so we need to be careful.
289 */
290 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
291 {
292 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
293
294 return printk_safe_log_store(s, fmt, args);
295 }
296
297 void notrace printk_nmi_enter(void)
298 {
299 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
300 }
301
302 void notrace printk_nmi_exit(void)
303 {
304 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
305 }
306
307 /*
308 * Marks a code that might produce many messages in NMI context
309 * and the risk of losing them is more critical than eventual
310 * reordering.
311 *
312 * It has effect only when called in NMI context. Then printk()
313 * will try to store the messages into the main logbuf directly
314 * and use the per-CPU buffers only as a fallback when the lock
315 * is not available.
316 */
317 void printk_nmi_direct_enter(void)
318 {
319 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
320 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
321 }
322
323 void printk_nmi_direct_exit(void)
324 {
325 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
326 }
327
328 #else
329
330 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331 {
332 return 0;
333 }
334
335 #endif /* CONFIG_PRINTK_NMI */
336
337 /*
338 * Lock-less printk(), to avoid deadlocks should the printk() recurse
339 * into itself. It uses a per-CPU buffer to store the message, just like
340 * NMI.
341 */
342 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343 {
344 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345
346 return printk_safe_log_store(s, fmt, args);
347 }
348
349 /* Can be preempted by NMI. */
350 void __printk_safe_enter(void)
351 {
352 this_cpu_inc(printk_context);
353 }
354
355 /* Can be preempted by NMI. */
356 void __printk_safe_exit(void)
357 {
358 this_cpu_dec(printk_context);
359 }
360
361 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362 {
363 /*
364 * Try to use the main logbuf even in NMI. But avoid calling console
365 * drivers that might have their own locks.
366 */
367 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
368 raw_spin_trylock(&logbuf_lock)) {
369 int len;
370
371 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
372 raw_spin_unlock(&logbuf_lock);
373 defer_console_output();
374 return len;
375 }
376
377 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
378 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
379 return vprintk_nmi(fmt, args);
380
381 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
382 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
383 return vprintk_safe(fmt, args);
384
385 /* No obstacles. */
386 return vprintk_default(fmt, args);
387 }
388
389 void __init printk_safe_init(void)
390 {
391 int cpu;
392
393 for_each_possible_cpu(cpu) {
394 struct printk_safe_seq_buf *s;
395
396 s = &per_cpu(safe_print_seq, cpu);
397 init_irq_work(&s->work, __printk_safe_flush);
398
399 #ifdef CONFIG_PRINTK_NMI
400 s = &per_cpu(nmi_print_seq, cpu);
401 init_irq_work(&s->work, __printk_safe_flush);
402 #endif
403 }
404
405 /*
406 * In the highly unlikely event that a NMI were to trigger at
407 * this moment. Make sure IRQ work is set up before this
408 * variable is set.
409 */
410 barrier();
411 printk_safe_irq_ready = 1;
412
413 /* Flush pending messages that did not have scheduled IRQ works. */
414 printk_safe_flush();
415 }