1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.txt
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56 #define data_race(expr) \
61 #ifndef ASSERT_EXCLUSIVE_WRITER
62 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
64 #ifndef ASSERT_EXCLUSIVE_ACCESS
65 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
68 /* Bits for ->extendables field, extendables param, and related definitions. */
69 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
70 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
71 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
72 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
73 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
74 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
75 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
76 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
77 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
78 #define RCUTORTURE_MAX_EXTEND \
79 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
80 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
81 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
82 /* Must be power of two minus one. */
83 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
85 torture_param(int, extendables
, RCUTORTURE_MAX_EXTEND
,
86 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
87 torture_param(int, fqs_duration
, 0,
88 "Duration of fqs bursts (us), 0 to disable");
89 torture_param(int, fqs_holdoff
, 0, "Holdoff time within fqs bursts (us)");
90 torture_param(int, fqs_stutter
, 3, "Wait time between fqs bursts (s)");
91 torture_param(bool, fwd_progress
, 1, "Test grace-period forward progress");
92 torture_param(int, fwd_progress_div
, 4, "Fraction of CPU stall to wait");
93 torture_param(int, fwd_progress_holdoff
, 60,
94 "Time between forward-progress tests (s)");
95 torture_param(bool, fwd_progress_need_resched
, 1,
96 "Hide cond_resched() behind need_resched()");
97 torture_param(bool, gp_cond
, false, "Use conditional/async GP wait primitives");
98 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
99 torture_param(bool, gp_normal
, false,
100 "Use normal (non-expedited) GP wait primitives");
101 torture_param(bool, gp_sync
, false, "Use synchronous GP wait primitives");
102 torture_param(int, irqreader
, 1, "Allow RCU readers from irq handlers");
103 torture_param(int, n_barrier_cbs
, 0,
104 "# of callbacks/kthreads for barrier testing");
105 torture_param(int, nfakewriters
, 4, "Number of RCU fake writer threads");
106 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
107 torture_param(int, object_debug
, 0,
108 "Enable debug-object double call_rcu() testing");
109 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
110 torture_param(int, onoff_interval
, 0,
111 "Time between CPU hotplugs (jiffies), 0=disable");
112 torture_param(int, shuffle_interval
, 3, "Number of seconds between shuffles");
113 torture_param(int, shutdown_secs
, 0, "Shutdown time (s), <= zero to disable.");
114 torture_param(int, stall_cpu
, 0, "Stall duration (s), zero to disable.");
115 torture_param(int, stall_cpu_holdoff
, 10,
116 "Time to wait before starting stall (s).");
117 torture_param(int, stall_cpu_irqsoff
, 0, "Disable interrupts while stalling.");
118 torture_param(int, stall_cpu_block
, 0, "Sleep while stalling.");
119 torture_param(int, stall_gp_kthread
, 0,
120 "Grace-period kthread stall duration (s).");
121 torture_param(int, stat_interval
, 60,
122 "Number of seconds between stats printk()s");
123 torture_param(int, stutter
, 5, "Number of seconds to run/halt test");
124 torture_param(int, test_boost
, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
125 torture_param(int, test_boost_duration
, 4,
126 "Duration of each boost test, seconds.");
127 torture_param(int, test_boost_interval
, 7,
128 "Interval between boost tests, seconds.");
129 torture_param(bool, test_no_idle_hz
, true,
130 "Test support for tickless idle CPUs");
131 torture_param(int, verbose
, 1,
132 "Enable verbose debugging printk()s");
134 static char *torture_type
= "rcu";
135 module_param(torture_type
, charp
, 0444);
136 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, srcu, ...)");
138 static int nrealreaders
;
139 static struct task_struct
*writer_task
;
140 static struct task_struct
**fakewriter_tasks
;
141 static struct task_struct
**reader_tasks
;
142 static struct task_struct
*stats_task
;
143 static struct task_struct
*fqs_task
;
144 static struct task_struct
*boost_tasks
[NR_CPUS
];
145 static struct task_struct
*stall_task
;
146 static struct task_struct
*fwd_prog_task
;
147 static struct task_struct
**barrier_cbs_tasks
;
148 static struct task_struct
*barrier_task
;
150 #define RCU_TORTURE_PIPE_LEN 10
153 struct rcu_head rtort_rcu
;
154 int rtort_pipe_count
;
155 struct list_head rtort_free
;
159 static LIST_HEAD(rcu_torture_freelist
);
160 static struct rcu_torture __rcu
*rcu_torture_current
;
161 static unsigned long rcu_torture_current_version
;
162 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
163 static DEFINE_SPINLOCK(rcu_torture_lock
);
164 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
);
165 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
);
166 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
167 static atomic_t n_rcu_torture_alloc
;
168 static atomic_t n_rcu_torture_alloc_fail
;
169 static atomic_t n_rcu_torture_free
;
170 static atomic_t n_rcu_torture_mberror
;
171 static atomic_t n_rcu_torture_error
;
172 static long n_rcu_torture_barrier_error
;
173 static long n_rcu_torture_boost_ktrerror
;
174 static long n_rcu_torture_boost_rterror
;
175 static long n_rcu_torture_boost_failure
;
176 static long n_rcu_torture_boosts
;
177 static atomic_long_t n_rcu_torture_timers
;
178 static long n_barrier_attempts
;
179 static long n_barrier_successes
; /* did rcu_barrier test succeed? */
180 static struct list_head rcu_torture_removed
;
181 static unsigned long shutdown_jiffies
;
183 static int rcu_torture_writer_state
;
184 #define RTWS_FIXED_DELAY 0
186 #define RTWS_REPLACE 2
187 #define RTWS_DEF_FREE 3
188 #define RTWS_EXP_SYNC 4
189 #define RTWS_COND_GET 5
190 #define RTWS_COND_SYNC 6
192 #define RTWS_STUTTER 8
193 #define RTWS_STOPPING 9
194 static const char * const rcu_torture_writer_state_names
[] = {
207 /* Record reader segment types and duration for first failing read. */
210 unsigned long rt_delay_jiffies
;
211 unsigned long rt_delay_ms
;
212 unsigned long rt_delay_us
;
215 static int err_segs_recorded
;
216 static struct rt_read_seg err_segs
[RCUTORTURE_RDR_MAX_SEGS
];
217 static int rt_read_nsegs
;
219 static const char *rcu_torture_writer_state_getname(void)
221 unsigned int i
= READ_ONCE(rcu_torture_writer_state
);
223 if (i
>= ARRAY_SIZE(rcu_torture_writer_state_names
))
225 return rcu_torture_writer_state_names
[i
];
228 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
229 #define rcu_can_boost() 1
230 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
231 #define rcu_can_boost() 0
232 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
234 #ifdef CONFIG_RCU_TRACE
235 static u64 notrace
rcu_trace_clock_local(void)
237 u64 ts
= trace_clock_local();
239 (void)do_div(ts
, NSEC_PER_USEC
);
242 #else /* #ifdef CONFIG_RCU_TRACE */
243 static u64 notrace
rcu_trace_clock_local(void)
247 #endif /* #else #ifdef CONFIG_RCU_TRACE */
250 * Stop aggressive CPU-hog tests a bit before the end of the test in order
251 * to avoid interfering with test shutdown.
253 static bool shutdown_time_arrived(void)
255 return shutdown_secs
&& time_after(jiffies
, shutdown_jiffies
- 30 * HZ
);
258 static unsigned long boost_starttime
; /* jiffies of next boost test start. */
259 static DEFINE_MUTEX(boost_mutex
); /* protect setting boost_starttime */
260 /* and boost task create/destroy. */
261 static atomic_t barrier_cbs_count
; /* Barrier callbacks registered. */
262 static bool barrier_phase
; /* Test phase. */
263 static atomic_t barrier_cbs_invoked
; /* Barrier callbacks invoked. */
264 static wait_queue_head_t
*barrier_cbs_wq
; /* Coordinate barrier testing. */
265 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq
);
267 static bool rcu_fwd_cb_nodelay
; /* Short rcu_torture_delay() delays. */
270 * Allocate an element from the rcu_tortures pool.
272 static struct rcu_torture
*
273 rcu_torture_alloc(void)
277 spin_lock_bh(&rcu_torture_lock
);
278 if (list_empty(&rcu_torture_freelist
)) {
279 atomic_inc(&n_rcu_torture_alloc_fail
);
280 spin_unlock_bh(&rcu_torture_lock
);
283 atomic_inc(&n_rcu_torture_alloc
);
284 p
= rcu_torture_freelist
.next
;
286 spin_unlock_bh(&rcu_torture_lock
);
287 return container_of(p
, struct rcu_torture
, rtort_free
);
291 * Free an element to the rcu_tortures pool.
294 rcu_torture_free(struct rcu_torture
*p
)
296 atomic_inc(&n_rcu_torture_free
);
297 spin_lock_bh(&rcu_torture_lock
);
298 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
299 spin_unlock_bh(&rcu_torture_lock
);
303 * Operations vector for selecting different types of tests.
306 struct rcu_torture_ops
{
309 void (*cleanup
)(void);
310 int (*readlock
)(void);
311 void (*read_delay
)(struct torture_random_state
*rrsp
,
312 struct rt_read_seg
*rtrsp
);
313 void (*readunlock
)(int idx
);
314 unsigned long (*get_gp_seq
)(void);
315 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
316 void (*deferred_free
)(struct rcu_torture
*p
);
318 void (*exp_sync
)(void);
319 unsigned long (*get_state
)(void);
320 void (*cond_sync
)(unsigned long oldstate
);
321 call_rcu_func_t call
;
322 void (*cb_barrier
)(void);
325 int (*stall_dur
)(void);
333 static struct rcu_torture_ops
*cur_ops
;
336 * Definitions for rcu torture testing.
339 static int rcu_torture_read_lock(void) __acquires(RCU
)
346 rcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
348 unsigned long started
;
349 unsigned long completed
;
350 const unsigned long shortdelay_us
= 200;
351 unsigned long longdelay_ms
= 300;
352 unsigned long long ts
;
354 /* We want a short delay sometimes to make a reader delay the grace
355 * period, and we want a long delay occasionally to trigger
356 * force_quiescent_state. */
358 if (!READ_ONCE(rcu_fwd_cb_nodelay
) &&
359 !(torture_random(rrsp
) % (nrealreaders
* 2000 * longdelay_ms
))) {
360 started
= cur_ops
->get_gp_seq();
361 ts
= rcu_trace_clock_local();
362 if (preempt_count() & (SOFTIRQ_MASK
| HARDIRQ_MASK
))
363 longdelay_ms
= 5; /* Avoid triggering BH limits. */
364 mdelay(longdelay_ms
);
365 rtrsp
->rt_delay_ms
= longdelay_ms
;
366 completed
= cur_ops
->get_gp_seq();
367 do_trace_rcu_torture_read(cur_ops
->name
, NULL
, ts
,
370 if (!(torture_random(rrsp
) % (nrealreaders
* 2 * shortdelay_us
))) {
371 udelay(shortdelay_us
);
372 rtrsp
->rt_delay_us
= shortdelay_us
;
374 if (!preempt_count() &&
375 !(torture_random(rrsp
) % (nrealreaders
* 500))) {
376 torture_preempt_schedule(); /* QS only if preemptible. */
377 rtrsp
->rt_preempted
= true;
381 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
387 * Update callback in the pipe. This should be invoked after a grace period.
390 rcu_torture_pipe_update_one(struct rcu_torture
*rp
)
394 i
= READ_ONCE(rp
->rtort_pipe_count
);
395 if (i
> RCU_TORTURE_PIPE_LEN
)
396 i
= RCU_TORTURE_PIPE_LEN
;
397 atomic_inc(&rcu_torture_wcount
[i
]);
398 WRITE_ONCE(rp
->rtort_pipe_count
, i
+ 1);
399 if (rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
400 rp
->rtort_mbtest
= 0;
407 * Update all callbacks in the pipe. Suitable for synchronous grace-period
411 rcu_torture_pipe_update(struct rcu_torture
*old_rp
)
413 struct rcu_torture
*rp
;
414 struct rcu_torture
*rp1
;
417 list_add(&old_rp
->rtort_free
, &rcu_torture_removed
);
418 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
419 if (rcu_torture_pipe_update_one(rp
)) {
420 list_del(&rp
->rtort_free
);
421 rcu_torture_free(rp
);
427 rcu_torture_cb(struct rcu_head
*p
)
429 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
431 if (torture_must_stop_irq()) {
432 /* Test is ending, just drop callbacks on the floor. */
433 /* The next initialization will pick up the pieces. */
436 if (rcu_torture_pipe_update_one(rp
))
437 rcu_torture_free(rp
);
439 cur_ops
->deferred_free(rp
);
442 static unsigned long rcu_no_completed(void)
447 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
449 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
452 static void rcu_sync_torture_init(void)
454 INIT_LIST_HEAD(&rcu_torture_removed
);
457 static struct rcu_torture_ops rcu_ops
= {
459 .init
= rcu_sync_torture_init
,
460 .readlock
= rcu_torture_read_lock
,
461 .read_delay
= rcu_read_delay
,
462 .readunlock
= rcu_torture_read_unlock
,
463 .get_gp_seq
= rcu_get_gp_seq
,
464 .gp_diff
= rcu_seq_diff
,
465 .deferred_free
= rcu_torture_deferred_free
,
466 .sync
= synchronize_rcu
,
467 .exp_sync
= synchronize_rcu_expedited
,
468 .get_state
= get_state_synchronize_rcu
,
469 .cond_sync
= cond_synchronize_rcu
,
471 .cb_barrier
= rcu_barrier
,
472 .fqs
= rcu_force_quiescent_state
,
474 .stall_dur
= rcu_jiffies_till_stall_check
,
476 .can_boost
= rcu_can_boost(),
477 .extendables
= RCUTORTURE_MAX_EXTEND
,
482 * Don't even think about trying any of these in real life!!!
483 * The names includes "busted", and they really means it!
484 * The only purpose of these functions is to provide a buggy RCU
485 * implementation to make sure that rcutorture correctly emits
486 * buggy-RCU error messages.
488 static void rcu_busted_torture_deferred_free(struct rcu_torture
*p
)
490 /* This is a deliberate bug for testing purposes only! */
491 rcu_torture_cb(&p
->rtort_rcu
);
494 static void synchronize_rcu_busted(void)
496 /* This is a deliberate bug for testing purposes only! */
500 call_rcu_busted(struct rcu_head
*head
, rcu_callback_t func
)
502 /* This is a deliberate bug for testing purposes only! */
506 static struct rcu_torture_ops rcu_busted_ops
= {
507 .ttype
= INVALID_RCU_FLAVOR
,
508 .init
= rcu_sync_torture_init
,
509 .readlock
= rcu_torture_read_lock
,
510 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
511 .readunlock
= rcu_torture_read_unlock
,
512 .get_gp_seq
= rcu_no_completed
,
513 .deferred_free
= rcu_busted_torture_deferred_free
,
514 .sync
= synchronize_rcu_busted
,
515 .exp_sync
= synchronize_rcu_busted
,
516 .call
= call_rcu_busted
,
525 * Definitions for srcu torture testing.
528 DEFINE_STATIC_SRCU(srcu_ctl
);
529 static struct srcu_struct srcu_ctld
;
530 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl
;
532 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp
)
534 return srcu_read_lock(srcu_ctlp
);
538 srcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
541 const long uspertick
= 1000000 / HZ
;
542 const long longdelay
= 10;
544 /* We want there to be long-running readers, but not all the time. */
546 delay
= torture_random(rrsp
) %
547 (nrealreaders
* 2 * longdelay
* uspertick
);
548 if (!delay
&& in_task()) {
549 schedule_timeout_interruptible(longdelay
);
550 rtrsp
->rt_delay_jiffies
= longdelay
;
552 rcu_read_delay(rrsp
, rtrsp
);
556 static void srcu_torture_read_unlock(int idx
) __releases(srcu_ctlp
)
558 srcu_read_unlock(srcu_ctlp
, idx
);
561 static unsigned long srcu_torture_completed(void)
563 return srcu_batches_completed(srcu_ctlp
);
566 static void srcu_torture_deferred_free(struct rcu_torture
*rp
)
568 call_srcu(srcu_ctlp
, &rp
->rtort_rcu
, rcu_torture_cb
);
571 static void srcu_torture_synchronize(void)
573 synchronize_srcu(srcu_ctlp
);
576 static void srcu_torture_call(struct rcu_head
*head
,
579 call_srcu(srcu_ctlp
, head
, func
);
582 static void srcu_torture_barrier(void)
584 srcu_barrier(srcu_ctlp
);
587 static void srcu_torture_stats(void)
589 srcu_torture_stats_print(srcu_ctlp
, torture_type
, TORTURE_FLAG
);
592 static void srcu_torture_synchronize_expedited(void)
594 synchronize_srcu_expedited(srcu_ctlp
);
597 static struct rcu_torture_ops srcu_ops
= {
598 .ttype
= SRCU_FLAVOR
,
599 .init
= rcu_sync_torture_init
,
600 .readlock
= srcu_torture_read_lock
,
601 .read_delay
= srcu_read_delay
,
602 .readunlock
= srcu_torture_read_unlock
,
603 .get_gp_seq
= srcu_torture_completed
,
604 .deferred_free
= srcu_torture_deferred_free
,
605 .sync
= srcu_torture_synchronize
,
606 .exp_sync
= srcu_torture_synchronize_expedited
,
607 .call
= srcu_torture_call
,
608 .cb_barrier
= srcu_torture_barrier
,
609 .stats
= srcu_torture_stats
,
614 static void srcu_torture_init(void)
616 rcu_sync_torture_init();
617 WARN_ON(init_srcu_struct(&srcu_ctld
));
618 srcu_ctlp
= &srcu_ctld
;
621 static void srcu_torture_cleanup(void)
623 cleanup_srcu_struct(&srcu_ctld
);
624 srcu_ctlp
= &srcu_ctl
; /* In case of a later rcutorture run. */
627 /* As above, but dynamically allocated. */
628 static struct rcu_torture_ops srcud_ops
= {
629 .ttype
= SRCU_FLAVOR
,
630 .init
= srcu_torture_init
,
631 .cleanup
= srcu_torture_cleanup
,
632 .readlock
= srcu_torture_read_lock
,
633 .read_delay
= srcu_read_delay
,
634 .readunlock
= srcu_torture_read_unlock
,
635 .get_gp_seq
= srcu_torture_completed
,
636 .deferred_free
= srcu_torture_deferred_free
,
637 .sync
= srcu_torture_synchronize
,
638 .exp_sync
= srcu_torture_synchronize_expedited
,
639 .call
= srcu_torture_call
,
640 .cb_barrier
= srcu_torture_barrier
,
641 .stats
= srcu_torture_stats
,
646 /* As above, but broken due to inappropriate reader extension. */
647 static struct rcu_torture_ops busted_srcud_ops
= {
648 .ttype
= SRCU_FLAVOR
,
649 .init
= srcu_torture_init
,
650 .cleanup
= srcu_torture_cleanup
,
651 .readlock
= srcu_torture_read_lock
,
652 .read_delay
= rcu_read_delay
,
653 .readunlock
= srcu_torture_read_unlock
,
654 .get_gp_seq
= srcu_torture_completed
,
655 .deferred_free
= srcu_torture_deferred_free
,
656 .sync
= srcu_torture_synchronize
,
657 .exp_sync
= srcu_torture_synchronize_expedited
,
658 .call
= srcu_torture_call
,
659 .cb_barrier
= srcu_torture_barrier
,
660 .stats
= srcu_torture_stats
,
662 .extendables
= RCUTORTURE_MAX_EXTEND
,
663 .name
= "busted_srcud"
667 * Definitions for RCU-tasks torture testing.
670 static int tasks_torture_read_lock(void)
675 static void tasks_torture_read_unlock(int idx
)
679 static void rcu_tasks_torture_deferred_free(struct rcu_torture
*p
)
681 call_rcu_tasks(&p
->rtort_rcu
, rcu_torture_cb
);
684 static void synchronize_rcu_mult_test(void)
686 synchronize_rcu_mult(call_rcu_tasks
, call_rcu
);
689 static struct rcu_torture_ops tasks_ops
= {
690 .ttype
= RCU_TASKS_FLAVOR
,
691 .init
= rcu_sync_torture_init
,
692 .readlock
= tasks_torture_read_lock
,
693 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
694 .readunlock
= tasks_torture_read_unlock
,
695 .get_gp_seq
= rcu_no_completed
,
696 .deferred_free
= rcu_tasks_torture_deferred_free
,
697 .sync
= synchronize_rcu_tasks
,
698 .exp_sync
= synchronize_rcu_mult_test
,
699 .call
= call_rcu_tasks
,
700 .cb_barrier
= rcu_barrier_tasks
,
709 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
710 * This implementation does not necessarily work well with CPU hotplug.
713 static void synchronize_rcu_trivial(void)
717 for_each_online_cpu(cpu
) {
718 rcutorture_sched_setaffinity(current
->pid
, cpumask_of(cpu
));
719 WARN_ON_ONCE(raw_smp_processor_id() != cpu
);
723 static int rcu_torture_read_lock_trivial(void) __acquires(RCU
)
729 static void rcu_torture_read_unlock_trivial(int idx
) __releases(RCU
)
734 static struct rcu_torture_ops trivial_ops
= {
735 .ttype
= RCU_TRIVIAL_FLAVOR
,
736 .init
= rcu_sync_torture_init
,
737 .readlock
= rcu_torture_read_lock_trivial
,
738 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
739 .readunlock
= rcu_torture_read_unlock_trivial
,
740 .get_gp_seq
= rcu_no_completed
,
741 .sync
= synchronize_rcu_trivial
,
742 .exp_sync
= synchronize_rcu_trivial
,
750 * Definitions for rude RCU-tasks torture testing.
753 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture
*p
)
755 call_rcu_tasks_rude(&p
->rtort_rcu
, rcu_torture_cb
);
758 static struct rcu_torture_ops tasks_rude_ops
= {
759 .ttype
= RCU_TASKS_RUDE_FLAVOR
,
760 .init
= rcu_sync_torture_init
,
761 .readlock
= rcu_torture_read_lock_trivial
,
762 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
763 .readunlock
= rcu_torture_read_unlock_trivial
,
764 .get_gp_seq
= rcu_no_completed
,
765 .deferred_free
= rcu_tasks_rude_torture_deferred_free
,
766 .sync
= synchronize_rcu_tasks_rude
,
767 .exp_sync
= synchronize_rcu_tasks_rude
,
768 .call
= call_rcu_tasks_rude
,
769 .cb_barrier
= rcu_barrier_tasks_rude
,
777 * Definitions for tracing RCU-tasks torture testing.
780 static int tasks_tracing_torture_read_lock(void)
782 rcu_read_lock_trace();
786 static void tasks_tracing_torture_read_unlock(int idx
)
788 rcu_read_unlock_trace();
791 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture
*p
)
793 call_rcu_tasks_trace(&p
->rtort_rcu
, rcu_torture_cb
);
796 static struct rcu_torture_ops tasks_tracing_ops
= {
797 .ttype
= RCU_TASKS_TRACING_FLAVOR
,
798 .init
= rcu_sync_torture_init
,
799 .readlock
= tasks_tracing_torture_read_lock
,
800 .read_delay
= srcu_read_delay
, /* just reuse srcu's version. */
801 .readunlock
= tasks_tracing_torture_read_unlock
,
802 .get_gp_seq
= rcu_no_completed
,
803 .deferred_free
= rcu_tasks_tracing_torture_deferred_free
,
804 .sync
= synchronize_rcu_tasks_trace
,
805 .exp_sync
= synchronize_rcu_tasks_trace
,
806 .call
= call_rcu_tasks_trace
,
807 .cb_barrier
= rcu_barrier_tasks_trace
,
812 .name
= "tasks-tracing"
815 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old
)
817 if (!cur_ops
->gp_diff
)
819 return cur_ops
->gp_diff(new, old
);
822 static bool __maybe_unused
torturing_tasks(void)
824 return cur_ops
== &tasks_ops
|| cur_ops
== &tasks_rude_ops
;
828 * RCU torture priority-boost testing. Runs one real-time thread per
829 * CPU for moderate bursts, repeatedly registering RCU callbacks and
830 * spinning waiting for them to be invoked. If a given callback takes
831 * too long to be invoked, we assume that priority inversion has occurred.
834 struct rcu_boost_inflight
{
839 static void rcu_torture_boost_cb(struct rcu_head
*head
)
841 struct rcu_boost_inflight
*rbip
=
842 container_of(head
, struct rcu_boost_inflight
, rcu
);
844 /* Ensure RCU-core accesses precede clearing ->inflight */
845 smp_store_release(&rbip
->inflight
, 0);
848 static int old_rt_runtime
= -1;
850 static void rcu_torture_disable_rt_throttle(void)
853 * Disable RT throttling so that rcutorture's boost threads don't get
854 * throttled. Only possible if rcutorture is built-in otherwise the
855 * user should manually do this by setting the sched_rt_period_us and
856 * sched_rt_runtime sysctls.
858 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
!= -1)
861 old_rt_runtime
= sysctl_sched_rt_runtime
;
862 sysctl_sched_rt_runtime
= -1;
865 static void rcu_torture_enable_rt_throttle(void)
867 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
== -1)
870 sysctl_sched_rt_runtime
= old_rt_runtime
;
874 static bool rcu_torture_boost_failed(unsigned long start
, unsigned long end
)
876 if (end
- start
> test_boost_duration
* HZ
- HZ
/ 2) {
877 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
878 n_rcu_torture_boost_failure
++;
880 return true; /* failed */
883 return false; /* passed */
886 static int rcu_torture_boost(void *arg
)
888 unsigned long call_rcu_time
;
889 unsigned long endtime
;
890 unsigned long oldstarttime
;
891 struct rcu_boost_inflight rbi
= { .inflight
= 0 };
892 struct sched_param sp
;
894 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
896 /* Set real-time priority. */
897 sp
.sched_priority
= 1;
898 if (sched_setscheduler(current
, SCHED_FIFO
, &sp
) < 0) {
899 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
900 n_rcu_torture_boost_rterror
++;
903 init_rcu_head_on_stack(&rbi
.rcu
);
904 /* Each pass through the following loop does one boost-test cycle. */
906 /* Track if the test failed already in this test interval? */
909 /* Increment n_rcu_torture_boosts once per boost-test */
910 while (!kthread_should_stop()) {
911 if (mutex_trylock(&boost_mutex
)) {
912 n_rcu_torture_boosts
++;
913 mutex_unlock(&boost_mutex
);
916 schedule_timeout_uninterruptible(1);
918 if (kthread_should_stop())
921 /* Wait for the next test interval. */
922 oldstarttime
= boost_starttime
;
923 while (time_before(jiffies
, oldstarttime
)) {
924 schedule_timeout_interruptible(oldstarttime
- jiffies
);
925 stutter_wait("rcu_torture_boost");
926 if (torture_must_stop())
930 /* Do one boost-test interval. */
931 endtime
= oldstarttime
+ test_boost_duration
* HZ
;
932 call_rcu_time
= jiffies
;
933 while (time_before(jiffies
, endtime
)) {
934 /* If we don't have a callback in flight, post one. */
935 if (!smp_load_acquire(&rbi
.inflight
)) {
936 /* RCU core before ->inflight = 1. */
937 smp_store_release(&rbi
.inflight
, 1);
938 call_rcu(&rbi
.rcu
, rcu_torture_boost_cb
);
939 /* Check if the boost test failed */
941 rcu_torture_boost_failed(call_rcu_time
,
943 call_rcu_time
= jiffies
;
945 stutter_wait("rcu_torture_boost");
946 if (torture_must_stop())
951 * If boost never happened, then inflight will always be 1, in
952 * this case the boost check would never happen in the above
953 * loop so do another one here.
955 if (!failed
&& smp_load_acquire(&rbi
.inflight
))
956 rcu_torture_boost_failed(call_rcu_time
, jiffies
);
959 * Set the start time of the next test interval.
960 * Yes, this is vulnerable to long delays, but such
961 * delays simply cause a false negative for the next
962 * interval. Besides, we are running at RT priority,
963 * so delays should be relatively rare.
965 while (oldstarttime
== boost_starttime
&&
966 !kthread_should_stop()) {
967 if (mutex_trylock(&boost_mutex
)) {
968 boost_starttime
= jiffies
+
969 test_boost_interval
* HZ
;
970 mutex_unlock(&boost_mutex
);
973 schedule_timeout_uninterruptible(1);
976 /* Go do the stutter. */
977 checkwait
: stutter_wait("rcu_torture_boost");
978 } while (!torture_must_stop());
980 /* Clean up and exit. */
981 while (!kthread_should_stop() || smp_load_acquire(&rbi
.inflight
)) {
982 torture_shutdown_absorb("rcu_torture_boost");
983 schedule_timeout_uninterruptible(1);
985 destroy_rcu_head_on_stack(&rbi
.rcu
);
986 torture_kthread_stopping("rcu_torture_boost");
991 * RCU torture force-quiescent-state kthread. Repeatedly induces
992 * bursts of calls to force_quiescent_state(), increasing the probability
993 * of occurrence of some important types of race conditions.
996 rcu_torture_fqs(void *arg
)
998 unsigned long fqs_resume_time
;
999 int fqs_burst_remaining
;
1001 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1003 fqs_resume_time
= jiffies
+ fqs_stutter
* HZ
;
1004 while (time_before(jiffies
, fqs_resume_time
) &&
1005 !kthread_should_stop()) {
1006 schedule_timeout_interruptible(1);
1008 fqs_burst_remaining
= fqs_duration
;
1009 while (fqs_burst_remaining
> 0 &&
1010 !kthread_should_stop()) {
1012 udelay(fqs_holdoff
);
1013 fqs_burst_remaining
-= fqs_holdoff
;
1015 stutter_wait("rcu_torture_fqs");
1016 } while (!torture_must_stop());
1017 torture_kthread_stopping("rcu_torture_fqs");
1022 * RCU torture writer kthread. Repeatedly substitutes a new structure
1023 * for that pointed to by rcu_torture_current, freeing the old structure
1024 * after a series of grace periods (the "pipeline").
1027 rcu_torture_writer(void *arg
)
1029 bool can_expedite
= !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1031 unsigned long gp_snap
;
1032 bool gp_cond1
= gp_cond
, gp_exp1
= gp_exp
, gp_normal1
= gp_normal
;
1033 bool gp_sync1
= gp_sync
;
1035 struct rcu_torture
*rp
;
1036 struct rcu_torture
*old_rp
;
1037 static DEFINE_TORTURE_RANDOM(rand
);
1038 int synctype
[] = { RTWS_DEF_FREE
, RTWS_EXP_SYNC
,
1039 RTWS_COND_GET
, RTWS_SYNC
};
1042 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1044 pr_alert("%s" TORTURE_FLAG
1045 " GP expediting controlled from boot/sysfs for %s.\n",
1046 torture_type
, cur_ops
->name
);
1048 /* Initialize synctype[] array. If none set, take default. */
1049 if (!gp_cond1
&& !gp_exp1
&& !gp_normal1
&& !gp_sync1
)
1050 gp_cond1
= gp_exp1
= gp_normal1
= gp_sync1
= true;
1051 if (gp_cond1
&& cur_ops
->get_state
&& cur_ops
->cond_sync
) {
1052 synctype
[nsynctypes
++] = RTWS_COND_GET
;
1053 pr_info("%s: Testing conditional GPs.\n", __func__
);
1054 } else if (gp_cond
&& (!cur_ops
->get_state
|| !cur_ops
->cond_sync
)) {
1055 pr_alert("%s: gp_cond without primitives.\n", __func__
);
1057 if (gp_exp1
&& cur_ops
->exp_sync
) {
1058 synctype
[nsynctypes
++] = RTWS_EXP_SYNC
;
1059 pr_info("%s: Testing expedited GPs.\n", __func__
);
1060 } else if (gp_exp
&& !cur_ops
->exp_sync
) {
1061 pr_alert("%s: gp_exp without primitives.\n", __func__
);
1063 if (gp_normal1
&& cur_ops
->deferred_free
) {
1064 synctype
[nsynctypes
++] = RTWS_DEF_FREE
;
1065 pr_info("%s: Testing asynchronous GPs.\n", __func__
);
1066 } else if (gp_normal
&& !cur_ops
->deferred_free
) {
1067 pr_alert("%s: gp_normal without primitives.\n", __func__
);
1069 if (gp_sync1
&& cur_ops
->sync
) {
1070 synctype
[nsynctypes
++] = RTWS_SYNC
;
1071 pr_info("%s: Testing normal GPs.\n", __func__
);
1072 } else if (gp_sync
&& !cur_ops
->sync
) {
1073 pr_alert("%s: gp_sync without primitives.\n", __func__
);
1075 if (WARN_ONCE(nsynctypes
== 0,
1076 "rcu_torture_writer: No update-side primitives.\n")) {
1078 * No updates primitives, so don't try updating.
1079 * The resulting test won't be testing much, hence the
1080 * above WARN_ONCE().
1082 rcu_torture_writer_state
= RTWS_STOPPING
;
1083 torture_kthread_stopping("rcu_torture_writer");
1087 rcu_torture_writer_state
= RTWS_FIXED_DELAY
;
1088 schedule_timeout_uninterruptible(1);
1089 rp
= rcu_torture_alloc();
1092 rp
->rtort_pipe_count
= 0;
1093 rcu_torture_writer_state
= RTWS_DELAY
;
1094 udelay(torture_random(&rand
) & 0x3ff);
1095 rcu_torture_writer_state
= RTWS_REPLACE
;
1096 old_rp
= rcu_dereference_check(rcu_torture_current
,
1097 current
== writer_task
);
1098 rp
->rtort_mbtest
= 1;
1099 rcu_assign_pointer(rcu_torture_current
, rp
);
1100 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1102 i
= old_rp
->rtort_pipe_count
;
1103 if (i
> RCU_TORTURE_PIPE_LEN
)
1104 i
= RCU_TORTURE_PIPE_LEN
;
1105 atomic_inc(&rcu_torture_wcount
[i
]);
1106 WRITE_ONCE(old_rp
->rtort_pipe_count
,
1107 old_rp
->rtort_pipe_count
+ 1);
1108 switch (synctype
[torture_random(&rand
) % nsynctypes
]) {
1110 rcu_torture_writer_state
= RTWS_DEF_FREE
;
1111 cur_ops
->deferred_free(old_rp
);
1114 rcu_torture_writer_state
= RTWS_EXP_SYNC
;
1115 cur_ops
->exp_sync();
1116 rcu_torture_pipe_update(old_rp
);
1119 rcu_torture_writer_state
= RTWS_COND_GET
;
1120 gp_snap
= cur_ops
->get_state();
1121 i
= torture_random(&rand
) % 16;
1123 schedule_timeout_interruptible(i
);
1124 udelay(torture_random(&rand
) % 1000);
1125 rcu_torture_writer_state
= RTWS_COND_SYNC
;
1126 cur_ops
->cond_sync(gp_snap
);
1127 rcu_torture_pipe_update(old_rp
);
1130 rcu_torture_writer_state
= RTWS_SYNC
;
1132 rcu_torture_pipe_update(old_rp
);
1139 WRITE_ONCE(rcu_torture_current_version
,
1140 rcu_torture_current_version
+ 1);
1141 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1143 !(torture_random(&rand
) & 0xff & (!!expediting
- 1))) {
1144 WARN_ON_ONCE(expediting
== 0 && rcu_gp_is_expedited());
1145 if (expediting
>= 0)
1148 rcu_unexpedite_gp();
1149 if (++expediting
> 3)
1150 expediting
= -expediting
;
1151 } else if (!can_expedite
) { /* Disabled during boot, recheck. */
1152 can_expedite
= !rcu_gp_is_expedited() &&
1153 !rcu_gp_is_normal();
1155 rcu_torture_writer_state
= RTWS_STUTTER
;
1156 if (stutter_wait("rcu_torture_writer") &&
1157 !READ_ONCE(rcu_fwd_cb_nodelay
) &&
1158 !cur_ops
->slow_gps
&&
1159 !torture_must_stop() &&
1160 rcu_inkernel_boot_has_ended())
1161 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++)
1162 if (list_empty(&rcu_tortures
[i
].rtort_free
) &&
1163 rcu_access_pointer(rcu_torture_current
) !=
1165 rcu_ftrace_dump(DUMP_ALL
);
1166 WARN(1, "%s: rtort_pipe_count: %d\n", __func__
, rcu_tortures
[i
].rtort_pipe_count
);
1168 } while (!torture_must_stop());
1169 /* Reset expediting back to unexpedited. */
1171 expediting
= -expediting
;
1172 while (can_expedite
&& expediting
++ < 0)
1173 rcu_unexpedite_gp();
1174 WARN_ON_ONCE(can_expedite
&& rcu_gp_is_expedited());
1176 pr_alert("%s" TORTURE_FLAG
1177 " Dynamic grace-period expediting was disabled.\n",
1179 rcu_torture_writer_state
= RTWS_STOPPING
;
1180 torture_kthread_stopping("rcu_torture_writer");
1185 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1186 * delay between calls.
1189 rcu_torture_fakewriter(void *arg
)
1191 DEFINE_TORTURE_RANDOM(rand
);
1193 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1194 set_user_nice(current
, MAX_NICE
);
1197 schedule_timeout_uninterruptible(1 + torture_random(&rand
)%10);
1198 udelay(torture_random(&rand
) & 0x3ff);
1199 if (cur_ops
->cb_barrier
!= NULL
&&
1200 torture_random(&rand
) % (nfakewriters
* 8) == 0) {
1201 cur_ops
->cb_barrier();
1202 } else if (gp_normal
== gp_exp
) {
1203 if (cur_ops
->sync
&& torture_random(&rand
) & 0x80)
1205 else if (cur_ops
->exp_sync
)
1206 cur_ops
->exp_sync();
1207 } else if (gp_normal
&& cur_ops
->sync
) {
1209 } else if (cur_ops
->exp_sync
) {
1210 cur_ops
->exp_sync();
1212 stutter_wait("rcu_torture_fakewriter");
1213 } while (!torture_must_stop());
1215 torture_kthread_stopping("rcu_torture_fakewriter");
1219 static void rcu_torture_timer_cb(struct rcu_head
*rhp
)
1225 * Do one extension of an RCU read-side critical section using the
1226 * current reader state in readstate (set to zero for initial entry
1227 * to extended critical section), set the new state as specified by
1228 * newstate (set to zero for final exit from extended critical section),
1229 * and random-number-generator state in trsp. If this is neither the
1230 * beginning or end of the critical section and if there was actually a
1231 * change, do a ->read_delay().
1233 static void rcutorture_one_extend(int *readstate
, int newstate
,
1234 struct torture_random_state
*trsp
,
1235 struct rt_read_seg
*rtrsp
)
1237 unsigned long flags
;
1239 int idxold
= *readstate
;
1240 int statesnew
= ~*readstate
& newstate
;
1241 int statesold
= *readstate
& ~newstate
;
1243 WARN_ON_ONCE(idxold
< 0);
1244 WARN_ON_ONCE((idxold
>> RCUTORTURE_RDR_SHIFT
) > 1);
1245 rtrsp
->rt_readstate
= newstate
;
1247 /* First, put new protection in place to avoid critical-section gap. */
1248 if (statesnew
& RCUTORTURE_RDR_BH
)
1250 if (statesnew
& RCUTORTURE_RDR_IRQ
)
1251 local_irq_disable();
1252 if (statesnew
& RCUTORTURE_RDR_PREEMPT
)
1254 if (statesnew
& RCUTORTURE_RDR_RBH
)
1256 if (statesnew
& RCUTORTURE_RDR_SCHED
)
1257 rcu_read_lock_sched();
1258 if (statesnew
& RCUTORTURE_RDR_RCU
)
1259 idxnew
= cur_ops
->readlock() << RCUTORTURE_RDR_SHIFT
;
1261 /* Next, remove old protection, irq first due to bh conflict. */
1262 if (statesold
& RCUTORTURE_RDR_IRQ
)
1264 if (statesold
& RCUTORTURE_RDR_BH
)
1266 if (statesold
& RCUTORTURE_RDR_PREEMPT
)
1268 if (statesold
& RCUTORTURE_RDR_RBH
)
1269 rcu_read_unlock_bh();
1270 if (statesold
& RCUTORTURE_RDR_SCHED
)
1271 rcu_read_unlock_sched();
1272 if (statesold
& RCUTORTURE_RDR_RCU
) {
1273 bool lockit
= !statesnew
&& !(torture_random(trsp
) & 0xffff);
1276 raw_spin_lock_irqsave(¤t
->pi_lock
, flags
);
1277 cur_ops
->readunlock(idxold
>> RCUTORTURE_RDR_SHIFT
);
1279 raw_spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
1282 /* Delay if neither beginning nor end and there was a change. */
1283 if ((statesnew
|| statesold
) && *readstate
&& newstate
)
1284 cur_ops
->read_delay(trsp
, rtrsp
);
1286 /* Update the reader state. */
1288 idxnew
= idxold
& ~RCUTORTURE_RDR_MASK
;
1289 WARN_ON_ONCE(idxnew
< 0);
1290 WARN_ON_ONCE((idxnew
>> RCUTORTURE_RDR_SHIFT
) > 1);
1291 *readstate
= idxnew
| newstate
;
1292 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) < 0);
1293 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) > 1);
1296 /* Return the biggest extendables mask given current RCU and boot parameters. */
1297 static int rcutorture_extend_mask_max(void)
1301 WARN_ON_ONCE(extendables
& ~RCUTORTURE_MAX_EXTEND
);
1302 mask
= extendables
& RCUTORTURE_MAX_EXTEND
& cur_ops
->extendables
;
1303 mask
= mask
| RCUTORTURE_RDR_RCU
;
1307 /* Return a random protection state mask, but with at least one bit set. */
1309 rcutorture_extend_mask(int oldmask
, struct torture_random_state
*trsp
)
1311 int mask
= rcutorture_extend_mask_max();
1312 unsigned long randmask1
= torture_random(trsp
) >> 8;
1313 unsigned long randmask2
= randmask1
>> 3;
1315 WARN_ON_ONCE(mask
>> RCUTORTURE_RDR_SHIFT
);
1316 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1317 if (!(randmask1
& 0x7))
1318 mask
= mask
& randmask2
;
1320 mask
= mask
& (1 << (randmask2
% RCUTORTURE_RDR_NBITS
));
1321 /* Can't enable bh w/irq disabled. */
1322 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1323 ((!(mask
& RCUTORTURE_RDR_BH
) && (oldmask
& RCUTORTURE_RDR_BH
)) ||
1324 (!(mask
& RCUTORTURE_RDR_RBH
) && (oldmask
& RCUTORTURE_RDR_RBH
))))
1325 mask
|= RCUTORTURE_RDR_BH
| RCUTORTURE_RDR_RBH
;
1326 return mask
?: RCUTORTURE_RDR_RCU
;
1330 * Do a randomly selected number of extensions of an existing RCU read-side
1333 static struct rt_read_seg
*
1334 rcutorture_loop_extend(int *readstate
, struct torture_random_state
*trsp
,
1335 struct rt_read_seg
*rtrsp
)
1339 int mask
= rcutorture_extend_mask_max();
1341 WARN_ON_ONCE(!*readstate
); /* -Existing- RCU read-side critsect! */
1342 if (!((mask
- 1) & mask
))
1343 return rtrsp
; /* Current RCU reader not extendable. */
1344 /* Bias towards larger numbers of loops. */
1345 i
= (torture_random(trsp
) >> 3);
1346 i
= ((i
| (i
>> 3)) & RCUTORTURE_RDR_MAX_LOOPS
) + 1;
1347 for (j
= 0; j
< i
; j
++) {
1348 mask
= rcutorture_extend_mask(*readstate
, trsp
);
1349 rcutorture_one_extend(readstate
, mask
, trsp
, &rtrsp
[j
]);
1355 * Do one read-side critical section, returning false if there was
1356 * no data to read. Can be invoked both from process context and
1357 * from a timer handler.
1359 static bool rcu_torture_one_read(struct torture_random_state
*trsp
)
1362 unsigned long started
;
1363 unsigned long completed
;
1365 struct rcu_torture
*p
;
1368 struct rt_read_seg rtseg
[RCUTORTURE_RDR_MAX_SEGS
] = { { 0 } };
1369 struct rt_read_seg
*rtrsp
= &rtseg
[0];
1370 struct rt_read_seg
*rtrsp1
;
1371 unsigned long long ts
;
1373 newstate
= rcutorture_extend_mask(readstate
, trsp
);
1374 rcutorture_one_extend(&readstate
, newstate
, trsp
, rtrsp
++);
1375 started
= cur_ops
->get_gp_seq();
1376 ts
= rcu_trace_clock_local();
1377 p
= rcu_dereference_check(rcu_torture_current
,
1378 rcu_read_lock_bh_held() ||
1379 rcu_read_lock_sched_held() ||
1380 srcu_read_lock_held(srcu_ctlp
) ||
1381 rcu_read_lock_trace_held() ||
1384 /* Wait for rcu_torture_writer to get underway */
1385 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1388 if (p
->rtort_mbtest
== 0)
1389 atomic_inc(&n_rcu_torture_mberror
);
1390 rtrsp
= rcutorture_loop_extend(&readstate
, trsp
, rtrsp
);
1392 pipe_count
= READ_ONCE(p
->rtort_pipe_count
);
1393 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
1394 /* Should not happen, but... */
1395 pipe_count
= RCU_TORTURE_PIPE_LEN
;
1397 completed
= cur_ops
->get_gp_seq();
1398 if (pipe_count
> 1) {
1399 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
,
1400 ts
, started
, completed
);
1401 rcu_ftrace_dump(DUMP_ALL
);
1403 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
1404 completed
= rcutorture_seq_diff(completed
, started
);
1405 if (completed
> RCU_TORTURE_PIPE_LEN
) {
1406 /* Should not happen, but... */
1407 completed
= RCU_TORTURE_PIPE_LEN
;
1409 __this_cpu_inc(rcu_torture_batch
[completed
]);
1411 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1412 WARN_ON_ONCE(readstate
& RCUTORTURE_RDR_MASK
);
1414 /* If error or close call, record the sequence of reader protections. */
1415 if ((pipe_count
> 1 || completed
> 1) && !xchg(&err_segs_recorded
, 1)) {
1417 for (rtrsp1
= &rtseg
[0]; rtrsp1
< rtrsp
; rtrsp1
++)
1418 err_segs
[i
++] = *rtrsp1
;
1425 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand
);
1428 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1429 * incrementing the corresponding element of the pipeline array. The
1430 * counter in the element should never be greater than 1, otherwise, the
1431 * RCU implementation is broken.
1433 static void rcu_torture_timer(struct timer_list
*unused
)
1435 atomic_long_inc(&n_rcu_torture_timers
);
1436 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand
));
1438 /* Test call_rcu() invocation from interrupt handler. */
1439 if (cur_ops
->call
) {
1440 struct rcu_head
*rhp
= kmalloc(sizeof(*rhp
), GFP_NOWAIT
);
1443 cur_ops
->call(rhp
, rcu_torture_timer_cb
);
1448 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1449 * incrementing the corresponding element of the pipeline array. The
1450 * counter in the element should never be greater than 1, otherwise, the
1451 * RCU implementation is broken.
1454 rcu_torture_reader(void *arg
)
1456 unsigned long lastsleep
= jiffies
;
1457 long myid
= (long)arg
;
1458 int mynumonline
= myid
;
1459 DEFINE_TORTURE_RANDOM(rand
);
1460 struct timer_list t
;
1462 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1463 set_user_nice(current
, MAX_NICE
);
1464 if (irqreader
&& cur_ops
->irq_capable
)
1465 timer_setup_on_stack(&t
, rcu_torture_timer
, 0);
1466 tick_dep_set_task(current
, TICK_DEP_BIT_RCU
);
1468 if (irqreader
&& cur_ops
->irq_capable
) {
1469 if (!timer_pending(&t
))
1470 mod_timer(&t
, jiffies
+ 1);
1472 if (!rcu_torture_one_read(&rand
) && !torture_must_stop())
1473 schedule_timeout_interruptible(HZ
);
1474 if (time_after(jiffies
, lastsleep
) && !torture_must_stop()) {
1475 schedule_timeout_interruptible(1);
1476 lastsleep
= jiffies
+ 10;
1478 while (num_online_cpus() < mynumonline
&& !torture_must_stop())
1479 schedule_timeout_interruptible(HZ
/ 5);
1480 stutter_wait("rcu_torture_reader");
1481 } while (!torture_must_stop());
1482 if (irqreader
&& cur_ops
->irq_capable
) {
1484 destroy_timer_on_stack(&t
);
1486 tick_dep_clear_task(current
, TICK_DEP_BIT_RCU
);
1487 torture_kthread_stopping("rcu_torture_reader");
1492 * Print torture statistics. Caller must ensure that there is only
1493 * one call to this function at a given time!!! This is normally
1494 * accomplished by relying on the module system to only have one copy
1495 * of the module loaded, and then by giving the rcu_torture_stats
1496 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1497 * thread is not running).
1500 rcu_torture_stats_print(void)
1504 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1505 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1506 struct rcu_torture
*rtcp
;
1507 static unsigned long rtcv_snap
= ULONG_MAX
;
1508 static bool splatted
;
1509 struct task_struct
*wtp
;
1511 for_each_possible_cpu(cpu
) {
1512 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1513 pipesummary
[i
] += READ_ONCE(per_cpu(rcu_torture_count
, cpu
)[i
]);
1514 batchsummary
[i
] += READ_ONCE(per_cpu(rcu_torture_batch
, cpu
)[i
]);
1517 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
1518 if (pipesummary
[i
] != 0)
1522 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1523 rtcp
= rcu_access_pointer(rcu_torture_current
);
1524 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1526 rtcp
&& !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1527 rcu_torture_current_version
,
1528 list_empty(&rcu_torture_freelist
),
1529 atomic_read(&n_rcu_torture_alloc
),
1530 atomic_read(&n_rcu_torture_alloc_fail
),
1531 atomic_read(&n_rcu_torture_free
));
1532 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1533 atomic_read(&n_rcu_torture_mberror
),
1534 n_rcu_torture_barrier_error
,
1535 n_rcu_torture_boost_ktrerror
,
1536 n_rcu_torture_boost_rterror
);
1537 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1538 n_rcu_torture_boost_failure
,
1539 n_rcu_torture_boosts
,
1540 atomic_long_read(&n_rcu_torture_timers
));
1541 torture_onoff_stats();
1542 pr_cont("barrier: %ld/%ld:%ld\n",
1543 data_race(n_barrier_successes
),
1544 data_race(n_barrier_attempts
),
1545 data_race(n_rcu_torture_barrier_error
));
1547 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1548 if (atomic_read(&n_rcu_torture_mberror
) ||
1549 n_rcu_torture_barrier_error
|| n_rcu_torture_boost_ktrerror
||
1550 n_rcu_torture_boost_rterror
|| n_rcu_torture_boost_failure
||
1552 pr_cont("%s", "!!! ");
1553 atomic_inc(&n_rcu_torture_error
);
1554 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror
));
1555 WARN_ON_ONCE(n_rcu_torture_barrier_error
); // rcu_barrier()
1556 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror
); // no boost kthread
1557 WARN_ON_ONCE(n_rcu_torture_boost_rterror
); // can't set RT prio
1558 WARN_ON_ONCE(n_rcu_torture_boost_failure
); // RCU boost failed
1559 WARN_ON_ONCE(i
> 1); // Too-short grace period
1561 pr_cont("Reader Pipe: ");
1562 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1563 pr_cont(" %ld", pipesummary
[i
]);
1566 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1567 pr_cont("Reader Batch: ");
1568 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1569 pr_cont(" %ld", batchsummary
[i
]);
1572 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1573 pr_cont("Free-Block Circulation: ");
1574 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1575 pr_cont(" %d", atomic_read(&rcu_torture_wcount
[i
]));
1581 if (rtcv_snap
== rcu_torture_current_version
&&
1582 rcu_access_pointer(rcu_torture_current
) &&
1583 !rcu_stall_is_suppressed()) {
1584 int __maybe_unused flags
= 0;
1585 unsigned long __maybe_unused gp_seq
= 0;
1587 rcutorture_get_gp_data(cur_ops
->ttype
,
1589 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
,
1591 wtp
= READ_ONCE(writer_task
);
1592 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1593 rcu_torture_writer_state_getname(),
1594 rcu_torture_writer_state
, gp_seq
, flags
,
1595 wtp
== NULL
? ~0UL : wtp
->state
,
1596 wtp
== NULL
? -1 : (int)task_cpu(wtp
));
1597 if (!splatted
&& wtp
) {
1598 sched_show_task(wtp
);
1601 show_rcu_gp_kthreads();
1602 rcu_ftrace_dump(DUMP_ALL
);
1604 rtcv_snap
= rcu_torture_current_version
;
1608 * Periodically prints torture statistics, if periodic statistics printing
1609 * was specified via the stat_interval module parameter.
1612 rcu_torture_stats(void *arg
)
1614 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1616 schedule_timeout_interruptible(stat_interval
* HZ
);
1617 rcu_torture_stats_print();
1618 torture_shutdown_absorb("rcu_torture_stats");
1619 } while (!torture_must_stop());
1620 torture_kthread_stopping("rcu_torture_stats");
1625 rcu_torture_print_module_parms(struct rcu_torture_ops
*cur_ops
, const char *tag
)
1627 pr_alert("%s" TORTURE_FLAG
1628 "--- %s: nreaders=%d nfakewriters=%d "
1629 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1630 "shuffle_interval=%d stutter=%d irqreader=%d "
1631 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1632 "test_boost=%d/%d test_boost_interval=%d "
1633 "test_boost_duration=%d shutdown_secs=%d "
1634 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1635 "stall_cpu_block=%d "
1637 "onoff_interval=%d onoff_holdoff=%d\n",
1638 torture_type
, tag
, nrealreaders
, nfakewriters
,
1639 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
1640 stutter
, irqreader
, fqs_duration
, fqs_holdoff
, fqs_stutter
,
1641 test_boost
, cur_ops
->can_boost
,
1642 test_boost_interval
, test_boost_duration
, shutdown_secs
,
1643 stall_cpu
, stall_cpu_holdoff
, stall_cpu_irqsoff
,
1646 onoff_interval
, onoff_holdoff
);
1649 static int rcutorture_booster_cleanup(unsigned int cpu
)
1651 struct task_struct
*t
;
1653 if (boost_tasks
[cpu
] == NULL
)
1655 mutex_lock(&boost_mutex
);
1656 t
= boost_tasks
[cpu
];
1657 boost_tasks
[cpu
] = NULL
;
1658 rcu_torture_enable_rt_throttle();
1659 mutex_unlock(&boost_mutex
);
1661 /* This must be outside of the mutex, otherwise deadlock! */
1662 torture_stop_kthread(rcu_torture_boost
, t
);
1666 static int rcutorture_booster_init(unsigned int cpu
)
1670 if (boost_tasks
[cpu
] != NULL
)
1671 return 0; /* Already created, nothing more to do. */
1673 /* Don't allow time recalculation while creating a new task. */
1674 mutex_lock(&boost_mutex
);
1675 rcu_torture_disable_rt_throttle();
1676 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1677 boost_tasks
[cpu
] = kthread_create_on_node(rcu_torture_boost
, NULL
,
1679 "rcu_torture_boost");
1680 if (IS_ERR(boost_tasks
[cpu
])) {
1681 retval
= PTR_ERR(boost_tasks
[cpu
]);
1682 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1683 n_rcu_torture_boost_ktrerror
++;
1684 boost_tasks
[cpu
] = NULL
;
1685 mutex_unlock(&boost_mutex
);
1688 kthread_bind(boost_tasks
[cpu
], cpu
);
1689 wake_up_process(boost_tasks
[cpu
]);
1690 mutex_unlock(&boost_mutex
);
1695 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1696 * induces a CPU stall for the time specified by stall_cpu.
1698 static int rcu_torture_stall(void *args
)
1701 unsigned long stop_at
;
1703 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1704 if (stall_cpu_holdoff
> 0) {
1705 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1706 schedule_timeout_interruptible(stall_cpu_holdoff
* HZ
);
1707 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1709 if (!kthread_should_stop() && stall_gp_kthread
> 0) {
1710 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1711 rcu_gp_set_torture_wait(stall_gp_kthread
* HZ
);
1712 for (idx
= 0; idx
< stall_gp_kthread
+ 2; idx
++) {
1713 if (kthread_should_stop())
1715 schedule_timeout_uninterruptible(HZ
);
1718 if (!kthread_should_stop() && stall_cpu
> 0) {
1719 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
1720 stop_at
= ktime_get_seconds() + stall_cpu
;
1721 /* RCU CPU stall is expected behavior in following code. */
1722 idx
= cur_ops
->readlock();
1723 if (stall_cpu_irqsoff
)
1724 local_irq_disable();
1725 else if (!stall_cpu_block
)
1727 pr_alert("rcu_torture_stall start on CPU %d.\n",
1728 raw_smp_processor_id());
1729 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1731 if (stall_cpu_block
)
1732 schedule_timeout_uninterruptible(HZ
);
1733 if (stall_cpu_irqsoff
)
1735 else if (!stall_cpu_block
)
1737 cur_ops
->readunlock(idx
);
1739 pr_alert("rcu_torture_stall end.\n");
1740 torture_shutdown_absorb("rcu_torture_stall");
1741 while (!kthread_should_stop())
1742 schedule_timeout_interruptible(10 * HZ
);
1746 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1747 static int __init
rcu_torture_stall_init(void)
1749 if (stall_cpu
<= 0 && stall_gp_kthread
<= 0)
1751 return torture_create_kthread(rcu_torture_stall
, NULL
, stall_task
);
1754 /* State structure for forward-progress self-propagating RCU callback. */
1755 struct fwd_cb_state
{
1761 * Forward-progress self-propagating RCU callback function. Because
1762 * callbacks run from softirq, this function is an implicit RCU read-side
1765 static void rcu_torture_fwd_prog_cb(struct rcu_head
*rhp
)
1767 struct fwd_cb_state
*fcsp
= container_of(rhp
, struct fwd_cb_state
, rh
);
1769 if (READ_ONCE(fcsp
->stop
)) {
1770 WRITE_ONCE(fcsp
->stop
, 2);
1773 cur_ops
->call(&fcsp
->rh
, rcu_torture_fwd_prog_cb
);
1776 /* State for continuous-flood RCU callbacks. */
1779 struct rcu_fwd_cb
*rfc_next
;
1780 struct rcu_fwd
*rfc_rfp
;
1784 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1785 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1786 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1787 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1788 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1790 struct rcu_launder_hist
{
1792 unsigned long launder_gp_seq
;
1796 spinlock_t rcu_fwd_lock
;
1797 struct rcu_fwd_cb
*rcu_fwd_cb_head
;
1798 struct rcu_fwd_cb
**rcu_fwd_cb_tail
;
1800 unsigned long rcu_fwd_startat
;
1801 struct rcu_launder_hist n_launders_hist
[N_LAUNDERS_HIST
];
1802 unsigned long rcu_launder_gp_seq_start
;
1805 static struct rcu_fwd
*rcu_fwds
;
1806 static bool rcu_fwd_emergency_stop
;
1808 static void rcu_torture_fwd_cb_hist(struct rcu_fwd
*rfp
)
1811 unsigned long gps_old
;
1815 for (i
= ARRAY_SIZE(rfp
->n_launders_hist
) - 1; i
> 0; i
--)
1816 if (rfp
->n_launders_hist
[i
].n_launders
> 0)
1818 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1819 __func__
, jiffies
- rfp
->rcu_fwd_startat
);
1820 gps_old
= rfp
->rcu_launder_gp_seq_start
;
1821 for (j
= 0; j
<= i
; j
++) {
1822 gps
= rfp
->n_launders_hist
[j
].launder_gp_seq
;
1823 pr_cont(" %ds/%d: %ld:%ld",
1824 j
+ 1, FWD_CBS_HIST_DIV
,
1825 rfp
->n_launders_hist
[j
].n_launders
,
1826 rcutorture_seq_diff(gps
, gps_old
));
1832 /* Callback function for continuous-flood RCU callbacks. */
1833 static void rcu_torture_fwd_cb_cr(struct rcu_head
*rhp
)
1835 unsigned long flags
;
1837 struct rcu_fwd_cb
*rfcp
= container_of(rhp
, struct rcu_fwd_cb
, rh
);
1838 struct rcu_fwd_cb
**rfcpp
;
1839 struct rcu_fwd
*rfp
= rfcp
->rfc_rfp
;
1841 rfcp
->rfc_next
= NULL
;
1843 spin_lock_irqsave(&rfp
->rcu_fwd_lock
, flags
);
1844 rfcpp
= rfp
->rcu_fwd_cb_tail
;
1845 rfp
->rcu_fwd_cb_tail
= &rfcp
->rfc_next
;
1846 WRITE_ONCE(*rfcpp
, rfcp
);
1847 WRITE_ONCE(rfp
->n_launders_cb
, rfp
->n_launders_cb
+ 1);
1848 i
= ((jiffies
- rfp
->rcu_fwd_startat
) / (HZ
/ FWD_CBS_HIST_DIV
));
1849 if (i
>= ARRAY_SIZE(rfp
->n_launders_hist
))
1850 i
= ARRAY_SIZE(rfp
->n_launders_hist
) - 1;
1851 rfp
->n_launders_hist
[i
].n_launders
++;
1852 rfp
->n_launders_hist
[i
].launder_gp_seq
= cur_ops
->get_gp_seq();
1853 spin_unlock_irqrestore(&rfp
->rcu_fwd_lock
, flags
);
1856 // Give the scheduler a chance, even on nohz_full CPUs.
1857 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter
)
1859 if (IS_ENABLED(CONFIG_PREEMPTION
) && IS_ENABLED(CONFIG_NO_HZ_FULL
)) {
1860 // Real call_rcu() floods hit userspace, so emulate that.
1861 if (need_resched() || (iter
& 0xfff))
1865 // No userspace emulation: CB invocation throttles call_rcu()
1870 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1871 * test is over or because we hit an OOM event.
1873 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd
*rfp
)
1875 unsigned long flags
;
1876 unsigned long freed
= 0;
1877 struct rcu_fwd_cb
*rfcp
;
1880 spin_lock_irqsave(&rfp
->rcu_fwd_lock
, flags
);
1881 rfcp
= rfp
->rcu_fwd_cb_head
;
1883 spin_unlock_irqrestore(&rfp
->rcu_fwd_lock
, flags
);
1886 rfp
->rcu_fwd_cb_head
= rfcp
->rfc_next
;
1887 if (!rfp
->rcu_fwd_cb_head
)
1888 rfp
->rcu_fwd_cb_tail
= &rfp
->rcu_fwd_cb_head
;
1889 spin_unlock_irqrestore(&rfp
->rcu_fwd_lock
, flags
);
1892 rcu_torture_fwd_prog_cond_resched(freed
);
1893 if (tick_nohz_full_enabled()) {
1894 local_irq_save(flags
);
1895 rcu_momentary_dyntick_idle();
1896 local_irq_restore(flags
);
1902 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1903 static void rcu_torture_fwd_prog_nr(struct rcu_fwd
*rfp
,
1904 int *tested
, int *tested_tries
)
1908 struct fwd_cb_state fcs
;
1913 bool selfpropcb
= false;
1914 unsigned long stopat
;
1915 static DEFINE_TORTURE_RANDOM(trs
);
1917 if (cur_ops
->call
&& cur_ops
->sync
&& cur_ops
->cb_barrier
) {
1918 init_rcu_head_on_stack(&fcs
.rh
);
1922 /* Tight loop containing cond_resched(). */
1923 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1924 cur_ops
->sync(); /* Later readers see above write. */
1926 WRITE_ONCE(fcs
.stop
, 0);
1927 cur_ops
->call(&fcs
.rh
, rcu_torture_fwd_prog_cb
);
1929 cver
= READ_ONCE(rcu_torture_current_version
);
1930 gps
= cur_ops
->get_gp_seq();
1931 sd
= cur_ops
->stall_dur() + 1;
1932 sd4
= (sd
+ fwd_progress_div
- 1) / fwd_progress_div
;
1933 dur
= sd4
+ torture_random(&trs
) % (sd
- sd4
);
1934 WRITE_ONCE(rfp
->rcu_fwd_startat
, jiffies
);
1935 stopat
= rfp
->rcu_fwd_startat
+ dur
;
1936 while (time_before(jiffies
, stopat
) &&
1937 !shutdown_time_arrived() &&
1938 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1939 idx
= cur_ops
->readlock();
1941 cur_ops
->readunlock(idx
);
1942 if (!fwd_progress_need_resched
|| need_resched())
1946 if (!time_before(jiffies
, stopat
) &&
1947 !shutdown_time_arrived() &&
1948 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1950 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1951 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1952 WARN_ON(!cver
&& gps
< 2);
1953 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__
, dur
, cver
, gps
);
1956 WRITE_ONCE(fcs
.stop
, 1);
1957 cur_ops
->sync(); /* Wait for running CB to complete. */
1958 cur_ops
->cb_barrier(); /* Wait for queued callbacks. */
1962 WARN_ON(READ_ONCE(fcs
.stop
) != 2);
1963 destroy_rcu_head_on_stack(&fcs
.rh
);
1965 schedule_timeout_uninterruptible(HZ
/ 10); /* Let kthreads recover. */
1966 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
1969 /* Carry out call_rcu() forward-progress testing. */
1970 static void rcu_torture_fwd_prog_cr(struct rcu_fwd
*rfp
)
1973 unsigned long flags
;
1977 long n_launders_cb_snap
;
1981 struct rcu_fwd_cb
*rfcp
;
1982 struct rcu_fwd_cb
*rfcpn
;
1983 unsigned long stopat
;
1984 unsigned long stoppedat
;
1986 if (READ_ONCE(rcu_fwd_emergency_stop
))
1987 return; /* Get out of the way quickly, no GP wait! */
1989 return; /* Can't do call_rcu() fwd prog without ->call. */
1991 /* Loop continuously posting RCU callbacks. */
1992 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1993 cur_ops
->sync(); /* Later readers see above write. */
1994 WRITE_ONCE(rfp
->rcu_fwd_startat
, jiffies
);
1995 stopat
= rfp
->rcu_fwd_startat
+ MAX_FWD_CB_JIFFIES
;
1997 rfp
->n_launders_cb
= 0; // Hoist initialization for multi-kthread
2001 for (i
= 0; i
< ARRAY_SIZE(rfp
->n_launders_hist
); i
++)
2002 rfp
->n_launders_hist
[i
].n_launders
= 0;
2003 cver
= READ_ONCE(rcu_torture_current_version
);
2004 gps
= cur_ops
->get_gp_seq();
2005 rfp
->rcu_launder_gp_seq_start
= gps
;
2006 tick_dep_set_task(current
, TICK_DEP_BIT_RCU
);
2007 while (time_before(jiffies
, stopat
) &&
2008 !shutdown_time_arrived() &&
2009 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
2010 rfcp
= READ_ONCE(rfp
->rcu_fwd_cb_head
);
2013 rfcpn
= READ_ONCE(rfcp
->rfc_next
);
2015 if (rfcp
->rfc_gps
>= MIN_FWD_CB_LAUNDERS
&&
2016 ++n_max_gps
>= MIN_FWD_CBS_LAUNDERED
)
2018 rfp
->rcu_fwd_cb_head
= rfcpn
;
2022 rfcp
= kmalloc(sizeof(*rfcp
), GFP_KERNEL
);
2023 if (WARN_ON_ONCE(!rfcp
)) {
2024 schedule_timeout_interruptible(1);
2030 rfcp
->rfc_rfp
= rfp
;
2032 cur_ops
->call(&rfcp
->rh
, rcu_torture_fwd_cb_cr
);
2033 rcu_torture_fwd_prog_cond_resched(n_launders
+ n_max_cbs
);
2034 if (tick_nohz_full_enabled()) {
2035 local_irq_save(flags
);
2036 rcu_momentary_dyntick_idle();
2037 local_irq_restore(flags
);
2040 stoppedat
= jiffies
;
2041 n_launders_cb_snap
= READ_ONCE(rfp
->n_launders_cb
);
2042 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
2043 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
2044 cur_ops
->cb_barrier(); /* Wait for callbacks to be invoked. */
2045 (void)rcu_torture_fwd_prog_cbfree(rfp
);
2047 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop
) &&
2048 !shutdown_time_arrived()) {
2049 WARN_ON(n_max_gps
< MIN_FWD_CBS_LAUNDERED
);
2050 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2052 stoppedat
- rfp
->rcu_fwd_startat
, jiffies
- stoppedat
,
2053 n_launders
+ n_max_cbs
- n_launders_cb_snap
,
2054 n_launders
, n_launders_sa
,
2055 n_max_gps
, n_max_cbs
, cver
, gps
);
2056 rcu_torture_fwd_cb_hist(rfp
);
2058 schedule_timeout_uninterruptible(HZ
); /* Let CBs drain. */
2059 tick_dep_clear_task(current
, TICK_DEP_BIT_RCU
);
2060 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
2065 * OOM notifier, but this only prints diagnostic information for the
2066 * current forward-progress test.
2068 static int rcutorture_oom_notify(struct notifier_block
*self
,
2069 unsigned long notused
, void *nfreed
)
2071 struct rcu_fwd
*rfp
= rcu_fwds
;
2073 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2075 rcu_torture_fwd_cb_hist(rfp
);
2076 rcu_fwd_progress_check(1 + (jiffies
- READ_ONCE(rfp
->rcu_fwd_startat
)) / 2);
2077 WRITE_ONCE(rcu_fwd_emergency_stop
, true);
2078 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2079 pr_info("%s: Freed %lu RCU callbacks.\n",
2080 __func__
, rcu_torture_fwd_prog_cbfree(rfp
));
2082 pr_info("%s: Freed %lu RCU callbacks.\n",
2083 __func__
, rcu_torture_fwd_prog_cbfree(rfp
));
2085 pr_info("%s: Freed %lu RCU callbacks.\n",
2086 __func__
, rcu_torture_fwd_prog_cbfree(rfp
));
2087 smp_mb(); /* Frees before return to avoid redoing OOM. */
2088 (*(unsigned long *)nfreed
)++; /* Forward progress CBs freed! */
2089 pr_info("%s returning after OOM processing.\n", __func__
);
2093 static struct notifier_block rcutorture_oom_nb
= {
2094 .notifier_call
= rcutorture_oom_notify
2097 /* Carry out grace-period forward-progress testing. */
2098 static int rcu_torture_fwd_prog(void *args
)
2100 struct rcu_fwd
*rfp
= args
;
2102 int tested_tries
= 0;
2104 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2105 rcu_bind_current_to_nocb();
2106 if (!IS_ENABLED(CONFIG_SMP
) || !IS_ENABLED(CONFIG_RCU_BOOST
))
2107 set_user_nice(current
, MAX_NICE
);
2109 schedule_timeout_interruptible(fwd_progress_holdoff
* HZ
);
2110 WRITE_ONCE(rcu_fwd_emergency_stop
, false);
2111 register_oom_notifier(&rcutorture_oom_nb
);
2112 if (!IS_ENABLED(CONFIG_TINY_RCU
) ||
2113 rcu_inkernel_boot_has_ended())
2114 rcu_torture_fwd_prog_nr(rfp
, &tested
, &tested_tries
);
2115 if (rcu_inkernel_boot_has_ended())
2116 rcu_torture_fwd_prog_cr(rfp
);
2117 unregister_oom_notifier(&rcutorture_oom_nb
);
2119 /* Avoid slow periods, better to test when busy. */
2120 stutter_wait("rcu_torture_fwd_prog");
2121 } while (!torture_must_stop());
2122 /* Short runs might not contain a valid forward-progress attempt. */
2123 WARN_ON(!tested
&& tested_tries
>= 5);
2124 pr_alert("%s: tested %d tested_tries %d\n", __func__
, tested
, tested_tries
);
2125 torture_kthread_stopping("rcu_torture_fwd_prog");
2129 /* If forward-progress checking is requested and feasible, spawn the thread. */
2130 static int __init
rcu_torture_fwd_prog_init(void)
2132 struct rcu_fwd
*rfp
;
2135 return 0; /* Not requested, so don't do it. */
2136 if (!cur_ops
->stall_dur
|| cur_ops
->stall_dur() <= 0 ||
2137 cur_ops
== &rcu_busted_ops
) {
2138 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2141 if (stall_cpu
> 0) {
2142 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2143 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS
))
2144 return -EINVAL
; /* In module, can fail back to user. */
2145 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2148 if (fwd_progress_holdoff
<= 0)
2149 fwd_progress_holdoff
= 1;
2150 if (fwd_progress_div
<= 0)
2151 fwd_progress_div
= 4;
2152 rfp
= kzalloc(sizeof(*rfp
), GFP_KERNEL
);
2155 spin_lock_init(&rfp
->rcu_fwd_lock
);
2156 rfp
->rcu_fwd_cb_tail
= &rfp
->rcu_fwd_cb_head
;
2157 return torture_create_kthread(rcu_torture_fwd_prog
, rfp
, fwd_prog_task
);
2160 /* Callback function for RCU barrier testing. */
2161 static void rcu_torture_barrier_cbf(struct rcu_head
*rcu
)
2163 atomic_inc(&barrier_cbs_invoked
);
2166 /* IPI handler to get callback posted on desired CPU, if online. */
2167 static void rcu_torture_barrier1cb(void *rcu_void
)
2169 struct rcu_head
*rhp
= rcu_void
;
2171 cur_ops
->call(rhp
, rcu_torture_barrier_cbf
);
2174 /* kthread function to register callbacks used to test RCU barriers. */
2175 static int rcu_torture_barrier_cbs(void *arg
)
2177 long myid
= (long)arg
;
2180 struct rcu_head rcu
;
2182 init_rcu_head_on_stack(&rcu
);
2183 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2184 set_user_nice(current
, MAX_NICE
);
2186 wait_event(barrier_cbs_wq
[myid
],
2188 smp_load_acquire(&barrier_phase
)) != lastphase
||
2189 torture_must_stop());
2190 lastphase
= newphase
;
2191 if (torture_must_stop())
2194 * The above smp_load_acquire() ensures barrier_phase load
2195 * is ordered before the following ->call().
2197 if (smp_call_function_single(myid
, rcu_torture_barrier1cb
,
2199 // IPI failed, so use direct call from current CPU.
2200 cur_ops
->call(&rcu
, rcu_torture_barrier_cbf
);
2202 if (atomic_dec_and_test(&barrier_cbs_count
))
2203 wake_up(&barrier_wq
);
2204 } while (!torture_must_stop());
2205 if (cur_ops
->cb_barrier
!= NULL
)
2206 cur_ops
->cb_barrier();
2207 destroy_rcu_head_on_stack(&rcu
);
2208 torture_kthread_stopping("rcu_torture_barrier_cbs");
2212 /* kthread function to drive and coordinate RCU barrier testing. */
2213 static int rcu_torture_barrier(void *arg
)
2217 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2219 atomic_set(&barrier_cbs_invoked
, 0);
2220 atomic_set(&barrier_cbs_count
, n_barrier_cbs
);
2221 /* Ensure barrier_phase ordered after prior assignments. */
2222 smp_store_release(&barrier_phase
, !barrier_phase
);
2223 for (i
= 0; i
< n_barrier_cbs
; i
++)
2224 wake_up(&barrier_cbs_wq
[i
]);
2225 wait_event(barrier_wq
,
2226 atomic_read(&barrier_cbs_count
) == 0 ||
2227 torture_must_stop());
2228 if (torture_must_stop())
2230 n_barrier_attempts
++;
2231 cur_ops
->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2232 if (atomic_read(&barrier_cbs_invoked
) != n_barrier_cbs
) {
2233 n_rcu_torture_barrier_error
++;
2234 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2235 atomic_read(&barrier_cbs_invoked
),
2238 // Wait manually for the remaining callbacks
2241 if (WARN_ON(i
++ > HZ
))
2243 schedule_timeout_interruptible(1);
2244 cur_ops
->cb_barrier();
2245 } while (atomic_read(&barrier_cbs_invoked
) !=
2247 !torture_must_stop());
2248 smp_mb(); // Can't trust ordering if broken.
2249 if (!torture_must_stop())
2250 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2251 atomic_read(&barrier_cbs_invoked
));
2253 n_barrier_successes
++;
2255 schedule_timeout_interruptible(HZ
/ 10);
2256 } while (!torture_must_stop());
2257 torture_kthread_stopping("rcu_torture_barrier");
2261 /* Initialize RCU barrier testing. */
2262 static int rcu_torture_barrier_init(void)
2267 if (n_barrier_cbs
<= 0)
2269 if (cur_ops
->call
== NULL
|| cur_ops
->cb_barrier
== NULL
) {
2270 pr_alert("%s" TORTURE_FLAG
2271 " Call or barrier ops missing for %s,\n",
2272 torture_type
, cur_ops
->name
);
2273 pr_alert("%s" TORTURE_FLAG
2274 " RCU barrier testing omitted from run.\n",
2278 atomic_set(&barrier_cbs_count
, 0);
2279 atomic_set(&barrier_cbs_invoked
, 0);
2281 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_tasks
[0]),
2284 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_wq
[0]), GFP_KERNEL
);
2285 if (barrier_cbs_tasks
== NULL
|| !barrier_cbs_wq
)
2287 for (i
= 0; i
< n_barrier_cbs
; i
++) {
2288 init_waitqueue_head(&barrier_cbs_wq
[i
]);
2289 ret
= torture_create_kthread(rcu_torture_barrier_cbs
,
2291 barrier_cbs_tasks
[i
]);
2295 return torture_create_kthread(rcu_torture_barrier
, NULL
, barrier_task
);
2298 /* Clean up after RCU barrier testing. */
2299 static void rcu_torture_barrier_cleanup(void)
2303 torture_stop_kthread(rcu_torture_barrier
, barrier_task
);
2304 if (barrier_cbs_tasks
!= NULL
) {
2305 for (i
= 0; i
< n_barrier_cbs
; i
++)
2306 torture_stop_kthread(rcu_torture_barrier_cbs
,
2307 barrier_cbs_tasks
[i
]);
2308 kfree(barrier_cbs_tasks
);
2309 barrier_cbs_tasks
= NULL
;
2311 if (barrier_cbs_wq
!= NULL
) {
2312 kfree(barrier_cbs_wq
);
2313 barrier_cbs_wq
= NULL
;
2317 static bool rcu_torture_can_boost(void)
2319 static int boost_warn_once
;
2322 if (!(test_boost
== 1 && cur_ops
->can_boost
) && test_boost
!= 2)
2325 prio
= rcu_get_gp_kthreads_prio();
2330 if (boost_warn_once
== 1)
2333 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME
);
2334 boost_warn_once
= 1;
2341 static enum cpuhp_state rcutor_hp
;
2344 rcu_torture_cleanup(void)
2348 unsigned long gp_seq
= 0;
2351 if (torture_cleanup_begin()) {
2352 if (cur_ops
->cb_barrier
!= NULL
)
2353 cur_ops
->cb_barrier();
2357 torture_cleanup_end();
2361 show_rcu_gp_kthreads();
2362 rcu_torture_barrier_cleanup();
2363 torture_stop_kthread(rcu_torture_fwd_prog
, fwd_prog_task
);
2364 torture_stop_kthread(rcu_torture_stall
, stall_task
);
2365 torture_stop_kthread(rcu_torture_writer
, writer_task
);
2368 for (i
= 0; i
< nrealreaders
; i
++)
2369 torture_stop_kthread(rcu_torture_reader
,
2371 kfree(reader_tasks
);
2373 rcu_torture_current
= NULL
;
2375 if (fakewriter_tasks
) {
2376 for (i
= 0; i
< nfakewriters
; i
++) {
2377 torture_stop_kthread(rcu_torture_fakewriter
,
2378 fakewriter_tasks
[i
]);
2380 kfree(fakewriter_tasks
);
2381 fakewriter_tasks
= NULL
;
2384 rcutorture_get_gp_data(cur_ops
->ttype
, &flags
, &gp_seq
);
2385 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
, &flags
, &gp_seq
);
2386 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2387 cur_ops
->name
, gp_seq
, flags
);
2388 torture_stop_kthread(rcu_torture_stats
, stats_task
);
2389 torture_stop_kthread(rcu_torture_fqs
, fqs_task
);
2390 if (rcu_torture_can_boost())
2391 cpuhp_remove_state(rcutor_hp
);
2394 * Wait for all RCU callbacks to fire, then do torture-type-specific
2395 * cleanup operations.
2397 if (cur_ops
->cb_barrier
!= NULL
)
2398 cur_ops
->cb_barrier();
2399 if (cur_ops
->cleanup
!= NULL
)
2402 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2404 if (err_segs_recorded
) {
2405 pr_alert("Failure/close-call rcutorture reader segments:\n");
2406 if (rt_read_nsegs
== 0)
2407 pr_alert("\t: No segments recorded!!!\n");
2409 for (i
= 0; i
< rt_read_nsegs
; i
++) {
2410 pr_alert("\t%d: %#x ", i
, err_segs
[i
].rt_readstate
);
2411 if (err_segs
[i
].rt_delay_jiffies
!= 0) {
2412 pr_cont("%s%ldjiffies", firsttime
? "" : "+",
2413 err_segs
[i
].rt_delay_jiffies
);
2416 if (err_segs
[i
].rt_delay_ms
!= 0) {
2417 pr_cont("%s%ldms", firsttime
? "" : "+",
2418 err_segs
[i
].rt_delay_ms
);
2421 if (err_segs
[i
].rt_delay_us
!= 0) {
2422 pr_cont("%s%ldus", firsttime
? "" : "+",
2423 err_segs
[i
].rt_delay_us
);
2427 err_segs
[i
].rt_preempted
? "preempted" : "");
2431 if (atomic_read(&n_rcu_torture_error
) || n_rcu_torture_barrier_error
)
2432 rcu_torture_print_module_parms(cur_ops
, "End of test: FAILURE");
2433 else if (torture_onoff_failures())
2434 rcu_torture_print_module_parms(cur_ops
,
2435 "End of test: RCU_HOTPLUG");
2437 rcu_torture_print_module_parms(cur_ops
, "End of test: SUCCESS");
2438 torture_cleanup_end();
2441 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2442 static void rcu_torture_leak_cb(struct rcu_head
*rhp
)
2446 static void rcu_torture_err_cb(struct rcu_head
*rhp
)
2449 * This -might- happen due to race conditions, but is unlikely.
2450 * The scenario that leads to this happening is that the
2451 * first of the pair of duplicate callbacks is queued,
2452 * someone else starts a grace period that includes that
2453 * callback, then the second of the pair must wait for the
2454 * next grace period. Unlikely, but can happen. If it
2455 * does happen, the debug-objects subsystem won't have splatted.
2457 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME
);
2459 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2462 * Verify that double-free causes debug-objects to complain, but only
2463 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2464 * cannot be carried out.
2466 static void rcu_test_debug_objects(void)
2468 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2469 struct rcu_head rh1
;
2470 struct rcu_head rh2
;
2472 init_rcu_head_on_stack(&rh1
);
2473 init_rcu_head_on_stack(&rh2
);
2474 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME
);
2476 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2477 preempt_disable(); /* Prevent preemption from interrupting test. */
2478 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2479 call_rcu(&rh1
, rcu_torture_leak_cb
); /* Start grace period. */
2480 local_irq_disable(); /* Make it harder to start a new grace period. */
2481 call_rcu(&rh2
, rcu_torture_leak_cb
);
2482 call_rcu(&rh2
, rcu_torture_err_cb
); /* Duplicate callback. */
2487 /* Wait for them all to get done so we can safely return. */
2489 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME
);
2490 destroy_rcu_head_on_stack(&rh1
);
2491 destroy_rcu_head_on_stack(&rh2
);
2492 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2493 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME
);
2494 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2497 static void rcutorture_sync(void)
2499 static unsigned long n
;
2501 if (cur_ops
->sync
&& !(++n
& 0xfff))
2506 rcu_torture_init(void)
2511 static struct rcu_torture_ops
*torture_ops
[] = {
2512 &rcu_ops
, &rcu_busted_ops
, &srcu_ops
, &srcud_ops
,
2513 &busted_srcud_ops
, &tasks_ops
, &tasks_rude_ops
,
2514 &tasks_tracing_ops
, &trivial_ops
,
2517 if (!torture_init_begin(torture_type
, verbose
))
2520 /* Process args and tell the world that the torturer is on the job. */
2521 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
2522 cur_ops
= torture_ops
[i
];
2523 if (strcmp(torture_type
, cur_ops
->name
) == 0)
2526 if (i
== ARRAY_SIZE(torture_ops
)) {
2527 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2529 pr_alert("rcu-torture types:");
2530 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
2531 pr_cont(" %s", torture_ops
[i
]->name
);
2533 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST
));
2538 if (cur_ops
->fqs
== NULL
&& fqs_duration
!= 0) {
2539 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2545 if (nreaders
>= 0) {
2546 nrealreaders
= nreaders
;
2548 nrealreaders
= num_online_cpus() - 2 - nreaders
;
2549 if (nrealreaders
<= 0)
2552 rcu_torture_print_module_parms(cur_ops
, "Start of test");
2554 /* Set up the freelist. */
2556 INIT_LIST_HEAD(&rcu_torture_freelist
);
2557 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
2558 rcu_tortures
[i
].rtort_mbtest
= 0;
2559 list_add_tail(&rcu_tortures
[i
].rtort_free
,
2560 &rcu_torture_freelist
);
2563 /* Initialize the statistics so that each run gets its own numbers. */
2565 rcu_torture_current
= NULL
;
2566 rcu_torture_current_version
= 0;
2567 atomic_set(&n_rcu_torture_alloc
, 0);
2568 atomic_set(&n_rcu_torture_alloc_fail
, 0);
2569 atomic_set(&n_rcu_torture_free
, 0);
2570 atomic_set(&n_rcu_torture_mberror
, 0);
2571 atomic_set(&n_rcu_torture_error
, 0);
2572 n_rcu_torture_barrier_error
= 0;
2573 n_rcu_torture_boost_ktrerror
= 0;
2574 n_rcu_torture_boost_rterror
= 0;
2575 n_rcu_torture_boost_failure
= 0;
2576 n_rcu_torture_boosts
= 0;
2577 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
2578 atomic_set(&rcu_torture_wcount
[i
], 0);
2579 for_each_possible_cpu(cpu
) {
2580 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
2581 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
2582 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
2585 err_segs_recorded
= 0;
2588 /* Start up the kthreads. */
2590 firsterr
= torture_create_kthread(rcu_torture_writer
, NULL
,
2594 if (nfakewriters
> 0) {
2595 fakewriter_tasks
= kcalloc(nfakewriters
,
2596 sizeof(fakewriter_tasks
[0]),
2598 if (fakewriter_tasks
== NULL
) {
2599 VERBOSE_TOROUT_ERRSTRING("out of memory");
2604 for (i
= 0; i
< nfakewriters
; i
++) {
2605 firsterr
= torture_create_kthread(rcu_torture_fakewriter
,
2606 NULL
, fakewriter_tasks
[i
]);
2610 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
2612 if (reader_tasks
== NULL
) {
2613 VERBOSE_TOROUT_ERRSTRING("out of memory");
2617 for (i
= 0; i
< nrealreaders
; i
++) {
2618 firsterr
= torture_create_kthread(rcu_torture_reader
, (void *)i
,
2623 if (stat_interval
> 0) {
2624 firsterr
= torture_create_kthread(rcu_torture_stats
, NULL
,
2629 if (test_no_idle_hz
&& shuffle_interval
> 0) {
2630 firsterr
= torture_shuffle_init(shuffle_interval
* HZ
);
2639 t
= cur_ops
->stall_dur
? cur_ops
->stall_dur() : stutter
* HZ
;
2640 firsterr
= torture_stutter_init(stutter
* HZ
, t
);
2644 if (fqs_duration
< 0)
2647 /* Create the fqs thread */
2648 firsterr
= torture_create_kthread(rcu_torture_fqs
, NULL
,
2653 if (test_boost_interval
< 1)
2654 test_boost_interval
= 1;
2655 if (test_boost_duration
< 2)
2656 test_boost_duration
= 2;
2657 if (rcu_torture_can_boost()) {
2659 boost_starttime
= jiffies
+ test_boost_interval
* HZ
;
2661 firsterr
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "RCU_TORTURE",
2662 rcutorture_booster_init
,
2663 rcutorture_booster_cleanup
);
2666 rcutor_hp
= firsterr
;
2668 shutdown_jiffies
= jiffies
+ shutdown_secs
* HZ
;
2669 firsterr
= torture_shutdown_init(shutdown_secs
, rcu_torture_cleanup
);
2672 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
,
2676 firsterr
= rcu_torture_stall_init();
2679 firsterr
= rcu_torture_fwd_prog_init();
2682 firsterr
= rcu_torture_barrier_init();
2686 rcu_test_debug_objects();
2692 rcu_torture_cleanup();
2696 module_init(rcu_torture_init
);
2697 module_exit(rcu_torture_cleanup
);