]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/rcu/update.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / kernel / rcu / update.c
CommitLineData
38b4df64 1// SPDX-License-Identifier: GPL-2.0+
1da177e4
LT
2/*
3 * Read-Copy Update mechanism for mutual exclusion
4 *
01c1c660 5 * Copyright IBM Corporation, 2001
1da177e4
LT
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
a71fca58 9 *
38b4df64 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
1da177e4
LT
11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 * Papers:
13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
14 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
15 *
16 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 17 * http://lse.sourceforge.net/locking/rcupdate.html
1da177e4
LT
18 *
19 */
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
3f07c014 26#include <linux/sched/signal.h>
b17b0153 27#include <linux/sched/debug.h>
60063497 28#include <linux/atomic.h>
1da177e4 29#include <linux/bitops.h>
1da177e4
LT
30#include <linux/percpu.h>
31#include <linux/notifier.h>
1da177e4 32#include <linux/cpu.h>
9331b315 33#include <linux/mutex.h>
9984de1a 34#include <linux/export.h>
e3818b8d 35#include <linux/hardirq.h>
e3ebfb96 36#include <linux/delay.h>
e77b7041 37#include <linux/moduleparam.h>
8315f422 38#include <linux/kthread.h>
4ff475ed 39#include <linux/tick.h>
f9411ebe 40#include <linux/rcupdate_wait.h>
78634061 41#include <linux/sched/isolation.h>
a39f15b9 42#include <linux/kprobes.h>
a35d1690 43#include <linux/slab.h>
b38f57c1 44#include <linux/irq_work.h>
1da177e4 45
29c00b4a 46#define CREATE_TRACE_POINTS
29c00b4a
PM
47
48#include "rcu.h"
49
4102adab
PM
50#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX
52#endif
53#define MODULE_PARAM_PREFIX "rcupdate."
54
c76e7e0b
PM
55#ifndef data_race
56#define data_race(expr) \
57 ({ \
58 expr; \
59 })
60#endif
61#ifndef ASSERT_EXCLUSIVE_WRITER
62#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
63#endif
64#ifndef ASSERT_EXCLUSIVE_ACCESS
65#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
66#endif
67
79cfea02 68#ifndef CONFIG_TINY_RCU
3705b88d 69module_param(rcu_expedited, int, 0);
5a9be7c6 70module_param(rcu_normal, int, 0);
3e42ec1a
PM
71static int rcu_normal_after_boot;
72module_param(rcu_normal_after_boot, int, 0);
79cfea02 73#endif /* #ifndef CONFIG_TINY_RCU */
3e42ec1a 74
293e2421 75#ifdef CONFIG_DEBUG_LOCK_ALLOC
d5671f6b 76/**
28875945
JFG
77 * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
78 * @ret: Best guess answer if lockdep cannot be relied on
d5671f6b 79 *
c28d5c09 80 * Returns true if lockdep must be ignored, in which case ``*ret`` contains
28875945 81 * the best guess described below. Otherwise returns false, in which
c28d5c09 82 * case ``*ret`` tells the caller nothing and the caller should instead
28875945
JFG
83 * consult lockdep.
84 *
c28d5c09 85 * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
d5671f6b
DV
86 * RCU-sched read-side critical section. In absence of
87 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
88 * critical section unless it can prove otherwise. Note that disabling
89 * of preemption (including disabling irqs) counts as an RCU-sched
90 * read-side critical section. This is useful for debug checks in functions
91 * that required that they be called within an RCU-sched read-side
92 * critical section.
93 *
94 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
95 * and while lockdep is disabled.
96 *
28875945
JFG
97 * Note that if the CPU is in the idle loop from an RCU point of view (ie:
98 * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
c28d5c09 99 * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
28875945
JFG
100 * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
101 * in such a section, considering these as in extended quiescent state,
102 * so such a CPU is effectively never in an RCU read-side critical section
103 * regardless of what RCU primitives it invokes. This state of affairs is
104 * required --- we need to keep an RCU-free window in idle where the CPU may
105 * possibly enter into low power mode. This way we can notice an extended
106 * quiescent state to other CPUs that started a grace period. Otherwise
107 * we would delay any grace period as long as we run in the idle task.
d5671f6b 108 *
28875945 109 * Similarly, we avoid claiming an RCU read lock held if the current
d5671f6b
DV
110 * CPU is offline.
111 */
28875945
JFG
112static bool rcu_read_lock_held_common(bool *ret)
113{
114 if (!debug_lockdep_rcu_enabled()) {
a66dbda7 115 *ret = true;
28875945
JFG
116 return true;
117 }
118 if (!rcu_is_watching()) {
a66dbda7 119 *ret = false;
28875945
JFG
120 return true;
121 }
122 if (!rcu_lockdep_current_cpu_online()) {
a66dbda7 123 *ret = false;
28875945
JFG
124 return true;
125 }
126 return false;
127}
128
d5671f6b
DV
129int rcu_read_lock_sched_held(void)
130{
28875945 131 bool ret;
d5671f6b 132
28875945
JFG
133 if (rcu_read_lock_held_common(&ret))
134 return ret;
9147089b 135 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
d5671f6b
DV
136}
137EXPORT_SYMBOL(rcu_read_lock_sched_held);
138#endif
139
0d39482c
PM
140#ifndef CONFIG_TINY_RCU
141
5a9be7c6
PM
142/*
143 * Should expedited grace-period primitives always fall back to their
144 * non-expedited counterparts? Intended for use within RCU. Note
145 * that if the user specifies both rcu_expedited and rcu_normal, then
52d7e48b 146 * rcu_normal wins. (Except during the time period during boot from
900b1028 147 * when the first task is spawned until the rcu_set_runtime_mode()
52d7e48b 148 * core_initcall() is invoked, at which point everything is expedited.)
5a9be7c6
PM
149 */
150bool rcu_gp_is_normal(void)
151{
52d7e48b
PM
152 return READ_ONCE(rcu_normal) &&
153 rcu_scheduler_active != RCU_SCHEDULER_INIT;
5a9be7c6 154}
4f2a848c 155EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
5a9be7c6 156
7c6094db 157static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
0d39482c
PM
158
159/*
160 * Should normal grace-period primitives be expedited? Intended for
161 * use within RCU. Note that this function takes the rcu_expedited
52d7e48b
PM
162 * sysfs/boot variable and rcu_scheduler_active into account as well
163 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
164 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
0d39482c
PM
165 */
166bool rcu_gp_is_expedited(void)
167{
b823cafa 168 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
0d39482c
PM
169}
170EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
171
172/**
173 * rcu_expedite_gp - Expedite future RCU grace periods
174 *
175 * After a call to this function, future calls to synchronize_rcu() and
176 * friends act as the corresponding synchronize_rcu_expedited() function
177 * had instead been called.
178 */
179void rcu_expedite_gp(void)
180{
181 atomic_inc(&rcu_expedited_nesting);
182}
183EXPORT_SYMBOL_GPL(rcu_expedite_gp);
184
185/**
186 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
187 *
188 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
189 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
190 * and if the rcu_expedited sysfs/boot parameter is not set, then all
191 * subsequent calls to synchronize_rcu() and friends will return to
192 * their normal non-expedited behavior.
193 */
194void rcu_unexpedite_gp(void)
195{
196 atomic_dec(&rcu_expedited_nesting);
197}
198EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
199
59ee0326
PM
200static bool rcu_boot_ended __read_mostly;
201
ee42571f
PM
202/*
203 * Inform RCU of the end of the in-kernel boot sequence.
204 */
205void rcu_end_inkernel_boot(void)
206{
7c6094db 207 rcu_unexpedite_gp();
3e42ec1a
PM
208 if (rcu_normal_after_boot)
209 WRITE_ONCE(rcu_normal, 1);
59ee0326 210 rcu_boot_ended = 1;
ee42571f 211}
0d39482c 212
59ee0326
PM
213/*
214 * Let rcutorture know when it is OK to turn it up to eleven.
215 */
216bool rcu_inkernel_boot_has_ended(void)
217{
218 return rcu_boot_ended;
ee42571f 219}
59ee0326 220EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
0d39482c 221
79cfea02
PM
222#endif /* #ifndef CONFIG_TINY_RCU */
223
900b1028
PM
224/*
225 * Test each non-SRCU synchronous grace-period wait API. This is
226 * useful just after a change in mode for these primitives, and
227 * during early boot.
228 */
229void rcu_test_sync_prims(void)
230{
231 if (!IS_ENABLED(CONFIG_PROVE_RCU))
232 return;
233 synchronize_rcu();
900b1028 234 synchronize_rcu_expedited();
900b1028
PM
235}
236
237#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
238
239/*
240 * Switch to run-time mode once RCU has fully initialized.
241 */
242static int __init rcu_set_runtime_mode(void)
243{
244 rcu_test_sync_prims();
245 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
a35d1690 246 kfree_rcu_scheduler_running();
900b1028
PM
247 rcu_test_sync_prims();
248 return 0;
249}
250core_initcall(rcu_set_runtime_mode);
251
252#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
253
162cc279
PM
254#ifdef CONFIG_DEBUG_LOCK_ALLOC
255static struct lock_class_key rcu_lock_key;
de8f5e4f
PZ
256struct lockdep_map rcu_lock_map = {
257 .name = "rcu_read_lock",
258 .key = &rcu_lock_key,
259 .wait_type_outer = LD_WAIT_FREE,
260 .wait_type_inner = LD_WAIT_CONFIG, /* XXX PREEMPT_RCU ? */
261};
162cc279 262EXPORT_SYMBOL_GPL(rcu_lock_map);
632ee200
PM
263
264static struct lock_class_key rcu_bh_lock_key;
de8f5e4f
PZ
265struct lockdep_map rcu_bh_lock_map = {
266 .name = "rcu_read_lock_bh",
267 .key = &rcu_bh_lock_key,
268 .wait_type_outer = LD_WAIT_FREE,
269 .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_LOCK also makes BH preemptible */
270};
632ee200
PM
271EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
272
273static struct lock_class_key rcu_sched_lock_key;
de8f5e4f
PZ
274struct lockdep_map rcu_sched_lock_map = {
275 .name = "rcu_read_lock_sched",
276 .key = &rcu_sched_lock_key,
277 .wait_type_outer = LD_WAIT_FREE,
278 .wait_type_inner = LD_WAIT_SPIN,
279};
632ee200 280EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
e3818b8d 281
24ef659a
PM
282static struct lock_class_key rcu_callback_key;
283struct lockdep_map rcu_callback_map =
284 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
285EXPORT_SYMBOL_GPL(rcu_callback_map);
286
ff5c4f5c 287noinstr int notrace debug_lockdep_rcu_enabled(void)
bc293d62 288{
52d7e48b 289 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
bc293d62
PM
290 current->lockdep_recursion == 0;
291}
292EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
293
85b39d30
ON
294/**
295 * rcu_read_lock_held() - might we be in RCU read-side critical section?
296 *
297 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
298 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
299 * this assumes we are in an RCU read-side critical section unless it can
300 * prove otherwise. This is useful for debug checks in functions that
301 * require that they be called within an RCU read-side critical section.
302 *
303 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
304 * and while lockdep is disabled.
305 *
306 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
307 * occur in the same context, for example, it is illegal to invoke
308 * rcu_read_unlock() in process context if the matching rcu_read_lock()
309 * was invoked from within an irq handler.
310 *
311 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
312 * offline from an RCU perspective, so check for those as well.
313 */
314int rcu_read_lock_held(void)
315{
28875945
JFG
316 bool ret;
317
318 if (rcu_read_lock_held_common(&ret))
319 return ret;
85b39d30
ON
320 return lock_is_held(&rcu_lock_map);
321}
322EXPORT_SYMBOL_GPL(rcu_read_lock_held);
323
e3818b8d 324/**
ca5ecddf 325 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
e3818b8d
PM
326 *
327 * Check for bottom half being disabled, which covers both the
328 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
329 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
ca5ecddf
PM
330 * will show the situation. This is useful for debug checks in functions
331 * that require that they be called within an RCU read-side critical
332 * section.
e3818b8d
PM
333 *
334 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
c0d6d01b 335 *
82fcecfa 336 * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
c0d6d01b 337 * offline from an RCU perspective, so check for those as well.
e3818b8d
PM
338 */
339int rcu_read_lock_bh_held(void)
340{
28875945
JFG
341 bool ret;
342
343 if (rcu_read_lock_held_common(&ret))
344 return ret;
773e3f93 345 return in_softirq() || irqs_disabled();
e3818b8d
PM
346}
347EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
348
28875945
JFG
349int rcu_read_lock_any_held(void)
350{
351 bool ret;
352
353 if (rcu_read_lock_held_common(&ret))
354 return ret;
355 if (lock_is_held(&rcu_lock_map) ||
356 lock_is_held(&rcu_bh_lock_map) ||
357 lock_is_held(&rcu_sched_lock_map))
358 return 1;
359 return !preemptible();
360}
361EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
362
e3818b8d
PM
363#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
364
ee376dbd
PM
365/**
366 * wakeme_after_rcu() - Callback function to awaken a task after grace period
367 * @head: Pointer to rcu_head member within rcu_synchronize structure
368 *
369 * Awaken the corresponding task now that a grace period has elapsed.
fbf6bfca 370 */
ee376dbd 371void wakeme_after_rcu(struct rcu_head *head)
21a1ea9e 372{
01c1c660
PM
373 struct rcu_synchronize *rcu;
374
375 rcu = container_of(head, struct rcu_synchronize, head);
376 complete(&rcu->completion);
21a1ea9e 377}
ec90a194 378EXPORT_SYMBOL_GPL(wakeme_after_rcu);
ee84b824 379
ec90a194
PM
380void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
381 struct rcu_synchronize *rs_array)
2c42818e 382{
ec90a194 383 int i;
68ab0b42 384 int j;
ec90a194 385
06462efc 386 /* Initialize and register callbacks for each crcu_array element. */
ec90a194
PM
387 for (i = 0; i < n; i++) {
388 if (checktiny &&
309ba859 389 (crcu_array[i] == call_rcu)) {
ec90a194
PM
390 might_sleep();
391 continue;
392 }
393 init_rcu_head_on_stack(&rs_array[i].head);
394 init_completion(&rs_array[i].completion);
68ab0b42
PM
395 for (j = 0; j < i; j++)
396 if (crcu_array[j] == crcu_array[i])
397 break;
398 if (j == i)
399 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
ec90a194
PM
400 }
401
402 /* Wait for all callbacks to be invoked. */
403 for (i = 0; i < n; i++) {
404 if (checktiny &&
309ba859 405 (crcu_array[i] == call_rcu))
ec90a194 406 continue;
68ab0b42
PM
407 for (j = 0; j < i; j++)
408 if (crcu_array[j] == crcu_array[i])
409 break;
410 if (j == i)
411 wait_for_completion(&rs_array[i].completion);
ec90a194
PM
412 destroy_rcu_head_on_stack(&rs_array[i].head);
413 }
2c42818e 414}
ec90a194 415EXPORT_SYMBOL_GPL(__wait_rcu_gp);
2c42818e 416
551d55a9 417#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
546a9d85 418void init_rcu_head(struct rcu_head *head)
551d55a9
MD
419{
420 debug_object_init(head, &rcuhead_debug_descr);
421}
156baec3 422EXPORT_SYMBOL_GPL(init_rcu_head);
551d55a9 423
546a9d85 424void destroy_rcu_head(struct rcu_head *head)
551d55a9
MD
425{
426 debug_object_free(head, &rcuhead_debug_descr);
427}
156baec3 428EXPORT_SYMBOL_GPL(destroy_rcu_head);
551d55a9 429
b9fdac7f 430static bool rcuhead_is_static_object(void *addr)
551d55a9 431{
b9fdac7f 432 return true;
551d55a9
MD
433}
434
435/**
436 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
437 * @head: pointer to rcu_head structure to be initialized
438 *
439 * This function informs debugobjects of a new rcu_head structure that
440 * has been allocated as an auto variable on the stack. This function
441 * is not required for rcu_head structures that are statically defined or
442 * that are dynamically allocated on the heap. This function has no
443 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
444 */
445void init_rcu_head_on_stack(struct rcu_head *head)
446{
447 debug_object_init_on_stack(head, &rcuhead_debug_descr);
448}
449EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
450
451/**
452 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
453 * @head: pointer to rcu_head structure to be initialized
454 *
455 * This function informs debugobjects that an on-stack rcu_head structure
456 * is about to go out of scope. As with init_rcu_head_on_stack(), this
457 * function is not required for rcu_head structures that are statically
458 * defined or that are dynamically allocated on the heap. Also as with
459 * init_rcu_head_on_stack(), this function has no effect for
460 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
461 */
462void destroy_rcu_head_on_stack(struct rcu_head *head)
463{
464 debug_object_free(head, &rcuhead_debug_descr);
465}
466EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
467
468struct debug_obj_descr rcuhead_debug_descr = {
469 .name = "rcu_head",
b9fdac7f 470 .is_static_object = rcuhead_is_static_object,
551d55a9
MD
471};
472EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
473#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
91afaf30 474
b3e627d3 475#if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
e66c33d5 476void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
52494535
PM
477 unsigned long secs,
478 unsigned long c_old, unsigned long c)
91afaf30 479{
52494535 480 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
91afaf30
PM
481}
482EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
483#else
52494535
PM
484#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
485 do { } while (0)
91afaf30 486#endif
6bfc09e2 487
c682db55
PM
488#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
489/* Get rcutorture access to sched_setaffinity(). */
490long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
491{
492 int ret;
493
494 ret = sched_setaffinity(pid, in_mask);
495 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
496 return ret;
497}
498EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
499#endif
500
6bfc09e2 501#ifdef CONFIG_RCU_STALL_COMMON
cdc694b2
PM
502int rcu_cpu_stall_ftrace_dump __read_mostly;
503module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
58c53360 504int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
f22ce091 505EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
6bfc09e2 506module_param(rcu_cpu_stall_suppress, int, 0644);
10462d6f 507int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
6bfc09e2 508module_param(rcu_cpu_stall_timeout, int, 0644);
6bfc09e2 509#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
8315f422 510
58c53360
PM
511// Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
512// warnings. Also used by rcutorture even if stall warnings are excluded.
513int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
514EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
515module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
516
aa23c6fb
PK
517#ifdef CONFIG_PROVE_RCU
518
519/*
72ce30dd 520 * Early boot self test parameters.
aa23c6fb
PK
521 */
522static bool rcu_self_test;
aa23c6fb 523module_param(rcu_self_test, bool, 0444);
aa23c6fb
PK
524
525static int rcu_self_test_counter;
526
527static void test_callback(struct rcu_head *r)
528{
529 rcu_self_test_counter++;
530 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
531}
532
e0fcba9a
PM
533DEFINE_STATIC_SRCU(early_srcu);
534
a35d1690
BP
535struct early_boot_kfree_rcu {
536 struct rcu_head rh;
537};
538
aa23c6fb
PK
539static void early_boot_test_call_rcu(void)
540{
541 static struct rcu_head head;
e0fcba9a 542 static struct rcu_head shead;
a35d1690 543 struct early_boot_kfree_rcu *rhp;
aa23c6fb
PK
544
545 call_rcu(&head, test_callback);
e0fcba9a
PM
546 if (IS_ENABLED(CONFIG_SRCU))
547 call_srcu(&early_srcu, &shead, test_callback);
a35d1690
BP
548 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
549 if (!WARN_ON_ONCE(!rhp))
550 kfree_rcu(rhp, rh);
aa23c6fb
PK
551}
552
553void rcu_early_boot_tests(void)
554{
555 pr_info("Running RCU self tests\n");
556
557 if (rcu_self_test)
558 early_boot_test_call_rcu();
52d7e48b 559 rcu_test_sync_prims();
aa23c6fb
PK
560}
561
562static int rcu_verify_early_boot_tests(void)
563{
564 int ret = 0;
565 int early_boot_test_counter = 0;
566
567 if (rcu_self_test) {
568 early_boot_test_counter++;
569 rcu_barrier();
e0fcba9a
PM
570 if (IS_ENABLED(CONFIG_SRCU)) {
571 early_boot_test_counter++;
572 srcu_barrier(&early_srcu);
573 }
aa23c6fb 574 }
aa23c6fb
PK
575 if (rcu_self_test_counter != early_boot_test_counter) {
576 WARN_ON(1);
577 ret = -1;
578 }
579
580 return ret;
581}
582late_initcall(rcu_verify_early_boot_tests);
583#else
584void rcu_early_boot_tests(void) {}
585#endif /* CONFIG_PROVE_RCU */
59d80fd8 586
eacd6f04
PM
587#include "tasks.h"
588
59d80fd8
PM
589#ifndef CONFIG_TINY_RCU
590
591/*
592 * Print any significant non-default boot-time settings.
593 */
594void __init rcupdate_announce_bootup_oddness(void)
595{
596 if (rcu_normal)
597 pr_info("\tNo expedited grace period (rcu_normal).\n");
598 else if (rcu_normal_after_boot)
599 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
600 else if (rcu_expedited)
601 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
602 if (rcu_cpu_stall_suppress)
603 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
604 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
605 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
606 rcu_tasks_bootup_oddness();
607}
608
609#endif /* #ifndef CONFIG_TINY_RCU */