1 // SPDX-License-Identifier: GPL-2.0+
3 * RCU CPU stall warnings for normal RCU grace periods
5 * Copyright IBM Corporation, 2019
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
10 #include <linux/kvm_para.h>
12 //////////////////////////////////////////////////////////////////////////////
14 // Controlling CPU stall warnings, including delay calculation.
16 /* panic() on RCU Stall sysctl. */
17 int sysctl_panic_on_rcu_stall __read_mostly
;
18 int sysctl_max_rcu_stall_to_panic __read_mostly
;
20 #ifdef CONFIG_PROVE_RCU
21 #define RCU_STALL_DELAY_DELTA (5 * HZ)
23 #define RCU_STALL_DELAY_DELTA 0
25 #define RCU_STALL_MIGHT_DIV 8
26 #define RCU_STALL_MIGHT_MIN (2 * HZ)
28 int rcu_exp_jiffies_till_stall_check(void)
30 int cpu_stall_timeout
= READ_ONCE(rcu_exp_cpu_stall_timeout
);
31 int exp_stall_delay_delta
= 0;
34 // Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
35 if (!cpu_stall_timeout
)
36 cpu_stall_timeout
= jiffies_to_msecs(rcu_jiffies_till_stall_check());
38 // Limit check must be consistent with the Kconfig limits for
39 // CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
40 // The minimum clamped value is "2UL", because at least one full
41 // tick has to be guaranteed.
42 till_stall_check
= clamp(msecs_to_jiffies(cpu_stall_timeout
), 2UL, 21UL * HZ
);
44 if (cpu_stall_timeout
&& jiffies_to_msecs(till_stall_check
) != cpu_stall_timeout
)
45 WRITE_ONCE(rcu_exp_cpu_stall_timeout
, jiffies_to_msecs(till_stall_check
));
47 #ifdef CONFIG_PROVE_RCU
48 /* Add extra ~25% out of till_stall_check. */
49 exp_stall_delay_delta
= ((till_stall_check
* 25) / 100) + 1;
52 return till_stall_check
+ exp_stall_delay_delta
;
54 EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check
);
56 /* Limit-check stall timeouts specified at boottime and runtime. */
57 int rcu_jiffies_till_stall_check(void)
59 int till_stall_check
= READ_ONCE(rcu_cpu_stall_timeout
);
62 * Limit check must be consistent with the Kconfig limits
63 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
65 if (till_stall_check
< 3) {
66 WRITE_ONCE(rcu_cpu_stall_timeout
, 3);
68 } else if (till_stall_check
> 300) {
69 WRITE_ONCE(rcu_cpu_stall_timeout
, 300);
70 till_stall_check
= 300;
72 return till_stall_check
* HZ
+ RCU_STALL_DELAY_DELTA
;
74 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check
);
77 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
79 * Returns @true if the current grace period is sufficiently old that
80 * it is reasonable to assume that it might be stalled. This can be
81 * useful when deciding whether to allocate memory to enable RCU-mediated
82 * freeing on the one hand or just invoking synchronize_rcu() on the other.
83 * The latter is preferable when the grace period is stalled.
85 * Note that sampling of the .gp_start and .gp_seq fields must be done
86 * carefully to avoid false positives at the beginnings and ends of
89 bool rcu_gp_might_be_stalled(void)
91 unsigned long d
= rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV
;
92 unsigned long j
= jiffies
;
94 if (d
< RCU_STALL_MIGHT_MIN
)
95 d
= RCU_STALL_MIGHT_MIN
;
96 smp_mb(); // jiffies before .gp_seq to avoid false positives.
97 if (!rcu_gp_in_progress())
99 // Long delays at this point avoids false positive, but a delay
100 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
101 smp_mb(); // .gp_seq before second .gp_start
103 return !time_before(j
, READ_ONCE(rcu_state
.gp_start
) + d
);
106 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
107 void rcu_sysrq_start(void)
109 if (!rcu_cpu_stall_suppress
)
110 rcu_cpu_stall_suppress
= 2;
113 void rcu_sysrq_end(void)
115 if (rcu_cpu_stall_suppress
== 2)
116 rcu_cpu_stall_suppress
= 0;
119 /* Don't print RCU CPU stall warnings during a kernel panic. */
120 static int rcu_panic(struct notifier_block
*this, unsigned long ev
, void *ptr
)
122 rcu_cpu_stall_suppress
= 1;
126 static struct notifier_block rcu_panic_block
= {
127 .notifier_call
= rcu_panic
,
130 static int __init
check_cpu_stall_init(void)
132 atomic_notifier_chain_register(&panic_notifier_list
, &rcu_panic_block
);
135 early_initcall(check_cpu_stall_init
);
137 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
138 static void panic_on_rcu_stall(void)
140 static int cpu_stall
;
142 if (++cpu_stall
< sysctl_max_rcu_stall_to_panic
)
145 if (sysctl_panic_on_rcu_stall
)
146 panic("RCU Stall\n");
150 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
152 * The caller must disable hard irqs.
154 void rcu_cpu_stall_reset(void)
156 WRITE_ONCE(rcu_state
.jiffies_stall
,
157 jiffies
+ rcu_jiffies_till_stall_check());
160 //////////////////////////////////////////////////////////////////////////////
162 // Interaction with RCU grace periods
164 /* Start of new grace period, so record stall time (and forcing times). */
165 static void record_gp_stall_check_time(void)
167 unsigned long j
= jiffies
;
170 WRITE_ONCE(rcu_state
.gp_start
, j
);
171 j1
= rcu_jiffies_till_stall_check();
172 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
173 WRITE_ONCE(rcu_state
.jiffies_stall
, j
+ j1
);
174 rcu_state
.jiffies_resched
= j
+ j1
/ 2;
175 rcu_state
.n_force_qs_gpstart
= READ_ONCE(rcu_state
.n_force_qs
);
178 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
179 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
181 rdp
->ticks_this_gp
= 0;
182 rdp
->softirq_snap
= kstat_softirqs_cpu(RCU_SOFTIRQ
, smp_processor_id());
183 WRITE_ONCE(rdp
->last_fqs_resched
, jiffies
);
187 * If too much time has passed in the current grace period, and if
188 * so configured, go kick the relevant kthreads.
190 static void rcu_stall_kick_kthreads(void)
194 if (!READ_ONCE(rcu_kick_kthreads
))
196 j
= READ_ONCE(rcu_state
.jiffies_kick_kthreads
);
197 if (time_after(jiffies
, j
) && rcu_state
.gp_kthread
&&
198 (rcu_gp_in_progress() || READ_ONCE(rcu_state
.gp_flags
))) {
199 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
201 rcu_ftrace_dump(DUMP_ALL
);
202 wake_up_process(rcu_state
.gp_kthread
);
203 WRITE_ONCE(rcu_state
.jiffies_kick_kthreads
, j
+ HZ
);
208 * Handler for the irq_work request posted about halfway into the RCU CPU
209 * stall timeout, and used to detect excessive irq disabling. Set state
210 * appropriately, but just complain if there is unexpected state on entry.
212 static void rcu_iw_handler(struct irq_work
*iwp
)
214 struct rcu_data
*rdp
;
215 struct rcu_node
*rnp
;
217 rdp
= container_of(iwp
, struct rcu_data
, rcu_iw
);
219 raw_spin_lock_rcu_node(rnp
);
220 if (!WARN_ON_ONCE(!rdp
->rcu_iw_pending
)) {
221 rdp
->rcu_iw_gp_seq
= rnp
->gp_seq
;
222 rdp
->rcu_iw_pending
= false;
224 raw_spin_unlock_rcu_node(rnp
);
227 //////////////////////////////////////////////////////////////////////////////
229 // Printing RCU CPU stall warnings
231 #ifdef CONFIG_PREEMPT_RCU
234 * Dump detailed information for all tasks blocking the current RCU
235 * grace period on the specified rcu_node structure.
237 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
240 struct task_struct
*t
;
242 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
243 if (!rcu_preempt_blocked_readers_cgp(rnp
)) {
244 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
247 t
= list_entry(rnp
->gp_tasks
->prev
,
248 struct task_struct
, rcu_node_entry
);
249 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
251 * We could be printing a lot while holding a spinlock.
252 * Avoid triggering hard lockup.
254 touch_nmi_watchdog();
257 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
260 // Communicate task state back to the RCU CPU stall warning request.
261 struct rcu_stall_chk_rdr
{
263 union rcu_special rs
;
268 * Report out the state of a not-running task that is stalling the
269 * current RCU grace period.
271 static int check_slow_task(struct task_struct
*t
, void *arg
)
273 struct rcu_stall_chk_rdr
*rscrp
= arg
;
276 return -EBUSY
; // It is running, so decline to inspect it.
277 rscrp
->nesting
= t
->rcu_read_lock_nesting
;
278 rscrp
->rs
= t
->rcu_read_unlock_special
;
279 rscrp
->on_blkd_list
= !list_empty(&t
->rcu_node_entry
);
284 * Scan the current list of tasks blocked within RCU read-side critical
285 * sections, printing out the tid of each of the first few of them.
287 static int rcu_print_task_stall(struct rcu_node
*rnp
, unsigned long flags
)
288 __releases(rnp
->lock
)
292 struct rcu_stall_chk_rdr rscr
;
293 struct task_struct
*t
;
294 struct task_struct
*ts
[8];
296 lockdep_assert_irqs_disabled();
297 if (!rcu_preempt_blocked_readers_cgp(rnp
)) {
298 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
301 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
302 rnp
->level
, rnp
->grplo
, rnp
->grphi
);
303 t
= list_entry(rnp
->gp_tasks
->prev
,
304 struct task_struct
, rcu_node_entry
);
305 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
308 if (i
>= ARRAY_SIZE(ts
))
311 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
314 if (task_call_func(t
, check_slow_task
, &rscr
))
315 pr_cont(" P%d", t
->pid
);
317 pr_cont(" P%d/%d:%c%c%c%c",
318 t
->pid
, rscr
.nesting
,
319 ".b"[rscr
.rs
.b
.blocked
],
320 ".q"[rscr
.rs
.b
.need_qs
],
321 ".e"[rscr
.rs
.b
.exp_hint
],
322 ".l"[rscr
.on_blkd_list
]);
323 lockdep_assert_irqs_disabled();
331 #else /* #ifdef CONFIG_PREEMPT_RCU */
334 * Because preemptible RCU does not exist, we never have to check for
335 * tasks blocked within RCU read-side critical sections.
337 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
342 * Because preemptible RCU does not exist, we never have to check for
343 * tasks blocked within RCU read-side critical sections.
345 static int rcu_print_task_stall(struct rcu_node
*rnp
, unsigned long flags
)
346 __releases(rnp
->lock
)
348 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
351 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
354 * Dump stacks of all tasks running on stalled CPUs. First try using
355 * NMIs, but fall back to manual remote stack tracing on architectures
356 * that don't support NMI-based stack dumps. The NMI-triggered stack
357 * traces are more accurate because they are printed by the target CPU.
359 static void rcu_dump_cpu_stacks(void)
363 struct rcu_node
*rnp
;
365 rcu_for_each_leaf_node(rnp
) {
366 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
367 for_each_leaf_node_possible_cpu(rnp
, cpu
)
368 if (rnp
->qsmask
& leaf_node_cpu_bit(rnp
, cpu
)) {
369 if (cpu_is_offline(cpu
))
370 pr_err("Offline CPU %d blocking current GP.\n", cpu
);
371 else if (!trigger_single_cpu_backtrace(cpu
))
374 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
378 static const char * const gp_state_names
[] = {
379 [RCU_GP_IDLE
] = "RCU_GP_IDLE",
380 [RCU_GP_WAIT_GPS
] = "RCU_GP_WAIT_GPS",
381 [RCU_GP_DONE_GPS
] = "RCU_GP_DONE_GPS",
382 [RCU_GP_ONOFF
] = "RCU_GP_ONOFF",
383 [RCU_GP_INIT
] = "RCU_GP_INIT",
384 [RCU_GP_WAIT_FQS
] = "RCU_GP_WAIT_FQS",
385 [RCU_GP_DOING_FQS
] = "RCU_GP_DOING_FQS",
386 [RCU_GP_CLEANUP
] = "RCU_GP_CLEANUP",
387 [RCU_GP_CLEANED
] = "RCU_GP_CLEANED",
391 * Convert a ->gp_state value to a character string.
393 static const char *gp_state_getname(short gs
)
395 if (gs
< 0 || gs
>= ARRAY_SIZE(gp_state_names
))
397 return gp_state_names
[gs
];
400 /* Is the RCU grace-period kthread being starved of CPU time? */
401 static bool rcu_is_gp_kthread_starving(unsigned long *jp
)
403 unsigned long j
= jiffies
- READ_ONCE(rcu_state
.gp_activity
);
410 static bool rcu_is_rcuc_kthread_starving(struct rcu_data
*rdp
, unsigned long *jp
)
412 unsigned long j
= jiffies
- READ_ONCE(rdp
->rcuc_activity
);
420 * Print out diagnostic information for the specified stalled CPU.
422 * If the specified CPU is aware of the current RCU grace period, then
423 * print the number of scheduling clock interrupts the CPU has taken
424 * during the time that it has been aware. Otherwise, print the number
425 * of RCU grace periods that this CPU is ignorant of, for example, "1"
426 * if the CPU was aware of the previous grace period.
428 * Also print out idle info.
430 static void print_cpu_stall_info(int cpu
)
434 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
436 unsigned long ticks_value
;
439 * We could be printing a lot while holding a spinlock. Avoid
440 * triggering hard lockup.
442 touch_nmi_watchdog();
444 ticks_value
= rcu_seq_ctr(rcu_state
.gp_seq
- rdp
->gp_seq
);
446 ticks_title
= "GPs behind";
448 ticks_title
= "ticks this GP";
449 ticks_value
= rdp
->ticks_this_gp
;
451 delta
= rcu_seq_ctr(rdp
->mynode
->gp_seq
- rdp
->rcu_iw_gp_seq
);
452 falsepositive
= rcu_is_gp_kthread_starving(NULL
) &&
453 rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp
));
454 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
456 "O."[!!cpu_online(cpu
)],
457 "o."[!!(rdp
->grpmask
& rdp
->mynode
->qsmaskinit
)],
458 "N."[!!(rdp
->grpmask
& rdp
->mynode
->qsmaskinitnext
)],
459 !IS_ENABLED(CONFIG_IRQ_WORK
) ? '?' :
460 rdp
->rcu_iw_pending
? (int)min(delta
, 9UL) + '0' :
462 ticks_value
, ticks_title
,
463 rcu_dynticks_snap(rdp
) & 0xfff,
464 rdp
->dynticks_nesting
, rdp
->dynticks_nmi_nesting
,
465 rdp
->softirq_snap
, kstat_softirqs_cpu(RCU_SOFTIRQ
, cpu
),
466 data_race(rcu_state
.n_force_qs
) - rcu_state
.n_force_qs_gpstart
,
467 falsepositive
? " (false positive?)" : "");
470 static void rcuc_kthread_dump(struct rcu_data
*rdp
)
474 struct task_struct
*rcuc
;
476 rcuc
= rdp
->rcu_cpu_kthread_task
;
480 cpu
= task_cpu(rcuc
);
481 if (cpu_is_offline(cpu
) || idle_cpu(cpu
))
484 if (!rcu_is_rcuc_kthread_starving(rdp
, &j
))
487 pr_err("%s kthread starved for %ld jiffies\n", rcuc
->comm
, j
);
488 sched_show_task(rcuc
);
489 if (!trigger_single_cpu_backtrace(cpu
))
493 /* Complain about starvation of grace-period kthread. */
494 static void rcu_check_gp_kthread_starvation(void)
497 struct task_struct
*gpk
= rcu_state
.gp_kthread
;
500 if (rcu_is_gp_kthread_starving(&j
)) {
501 cpu
= gpk
? task_cpu(gpk
) : -1;
502 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
504 (long)rcu_seq_current(&rcu_state
.gp_seq
),
505 data_race(READ_ONCE(rcu_state
.gp_flags
)),
506 gp_state_getname(rcu_state
.gp_state
),
507 data_race(READ_ONCE(rcu_state
.gp_state
)),
508 gpk
? data_race(READ_ONCE(gpk
->__state
)) : ~0, cpu
);
510 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state
.name
);
511 pr_err("RCU grace-period kthread stack dump:\n");
512 sched_show_task(gpk
);
514 if (cpu_is_offline(cpu
)) {
515 pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu
);
517 pr_err("Stack dump where RCU GP kthread last ran:\n");
518 if (!trigger_single_cpu_backtrace(cpu
))
522 wake_up_process(gpk
);
527 /* Complain about missing wakeups from expired fqs wait timer */
528 static void rcu_check_gp_kthread_expired_fqs_timer(void)
530 struct task_struct
*gpk
= rcu_state
.gp_kthread
;
532 unsigned long jiffies_fqs
;
536 * Order reads of .gp_state and .jiffies_force_qs.
537 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
539 gp_state
= smp_load_acquire(&rcu_state
.gp_state
);
540 jiffies_fqs
= READ_ONCE(rcu_state
.jiffies_force_qs
);
542 if (gp_state
== RCU_GP_WAIT_FQS
&&
543 time_after(jiffies
, jiffies_fqs
+ RCU_STALL_MIGHT_MIN
) &&
544 gpk
&& !READ_ONCE(gpk
->on_rq
)) {
546 pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
547 rcu_state
.name
, (jiffies
- jiffies_fqs
),
548 (long)rcu_seq_current(&rcu_state
.gp_seq
),
549 data_race(rcu_state
.gp_flags
),
550 gp_state_getname(RCU_GP_WAIT_FQS
), RCU_GP_WAIT_FQS
,
551 data_race(READ_ONCE(gpk
->__state
)));
552 pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
553 cpu
, kstat_softirqs_cpu(TIMER_SOFTIRQ
, cpu
));
557 static void print_other_cpu_stall(unsigned long gp_seq
, unsigned long gps
)
564 struct rcu_node
*rnp
;
567 lockdep_assert_irqs_disabled();
569 /* Kick and suppress, if so configured. */
570 rcu_stall_kick_kthreads();
571 if (rcu_stall_is_suppressed())
575 * OK, time to rat on our buddy...
576 * See Documentation/RCU/stallwarn.rst for info on how to debug
577 * RCU CPU stall warnings.
579 trace_rcu_stall_warning(rcu_state
.name
, TPS("StallDetected"));
580 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state
.name
);
581 rcu_for_each_leaf_node(rnp
) {
582 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
583 if (rnp
->qsmask
!= 0) {
584 for_each_leaf_node_possible_cpu(rnp
, cpu
)
585 if (rnp
->qsmask
& leaf_node_cpu_bit(rnp
, cpu
)) {
586 print_cpu_stall_info(cpu
);
590 ndetected
+= rcu_print_task_stall(rnp
, flags
); // Releases rnp->lock.
591 lockdep_assert_irqs_disabled();
594 for_each_possible_cpu(cpu
)
595 totqlen
+= rcu_get_n_cbs_cpu(cpu
);
596 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
597 smp_processor_id(), (long)(jiffies
- gps
),
598 (long)rcu_seq_current(&rcu_state
.gp_seq
), totqlen
, rcu_state
.n_online_cpus
);
600 rcu_dump_cpu_stacks();
602 /* Complain about tasks blocking the grace period. */
603 rcu_for_each_leaf_node(rnp
)
604 rcu_print_detail_task_stall_rnp(rnp
);
606 if (rcu_seq_current(&rcu_state
.gp_seq
) != gp_seq
) {
607 pr_err("INFO: Stall ended before state dump start\n");
610 gpa
= data_race(READ_ONCE(rcu_state
.gp_activity
));
611 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
612 rcu_state
.name
, j
- gpa
, j
, gpa
,
613 data_race(READ_ONCE(jiffies_till_next_fqs
)),
614 data_race(READ_ONCE(rcu_get_root()->qsmask
)));
617 /* Rewrite if needed in case of slow consoles. */
618 if (ULONG_CMP_GE(jiffies
, READ_ONCE(rcu_state
.jiffies_stall
)))
619 WRITE_ONCE(rcu_state
.jiffies_stall
,
620 jiffies
+ 3 * rcu_jiffies_till_stall_check() + 3);
622 rcu_check_gp_kthread_expired_fqs_timer();
623 rcu_check_gp_kthread_starvation();
625 panic_on_rcu_stall();
627 rcu_force_quiescent_state(); /* Kick them all. */
630 static void print_cpu_stall(unsigned long gps
)
634 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
635 struct rcu_node
*rnp
= rcu_get_root();
638 lockdep_assert_irqs_disabled();
640 /* Kick and suppress, if so configured. */
641 rcu_stall_kick_kthreads();
642 if (rcu_stall_is_suppressed())
646 * OK, time to rat on ourselves...
647 * See Documentation/RCU/stallwarn.rst for info on how to debug
648 * RCU CPU stall warnings.
650 printk_prefer_direct_enter();
651 trace_rcu_stall_warning(rcu_state
.name
, TPS("SelfDetected"));
652 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state
.name
);
653 raw_spin_lock_irqsave_rcu_node(rdp
->mynode
, flags
);
654 print_cpu_stall_info(smp_processor_id());
655 raw_spin_unlock_irqrestore_rcu_node(rdp
->mynode
, flags
);
656 for_each_possible_cpu(cpu
)
657 totqlen
+= rcu_get_n_cbs_cpu(cpu
);
658 pr_cont("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
660 (long)rcu_seq_current(&rcu_state
.gp_seq
), totqlen
, rcu_state
.n_online_cpus
);
662 rcu_check_gp_kthread_expired_fqs_timer();
663 rcu_check_gp_kthread_starvation();
666 rcuc_kthread_dump(rdp
);
668 rcu_dump_cpu_stacks();
670 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
671 /* Rewrite if needed in case of slow consoles. */
672 if (ULONG_CMP_GE(jiffies
, READ_ONCE(rcu_state
.jiffies_stall
)))
673 WRITE_ONCE(rcu_state
.jiffies_stall
,
674 jiffies
+ 3 * rcu_jiffies_till_stall_check() + 3);
675 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
677 panic_on_rcu_stall();
680 * Attempt to revive the RCU machinery by forcing a context switch.
682 * A context switch would normally allow the RCU state machine to make
683 * progress and it could be we're stuck in kernel space without context
684 * switches for an entirely unreasonable amount of time.
686 set_tsk_need_resched(current
);
687 set_preempt_need_resched();
688 printk_prefer_direct_exit();
691 static void check_cpu_stall(struct rcu_data
*rdp
)
693 bool didstall
= false;
700 struct rcu_node
*rnp
;
702 lockdep_assert_irqs_disabled();
703 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads
)) ||
704 !rcu_gp_in_progress())
706 rcu_stall_kick_kthreads();
710 * Lots of memory barriers to reject false positives.
712 * The idea is to pick up rcu_state.gp_seq, then
713 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
714 * another copy of rcu_state.gp_seq. These values are updated in
715 * the opposite order with memory barriers (or equivalent) during
716 * grace-period initialization and cleanup. Now, a false positive
717 * can occur if we get an new value of rcu_state.gp_start and a old
718 * value of rcu_state.jiffies_stall. But given the memory barriers,
719 * the only way that this can happen is if one grace period ends
720 * and another starts between these two fetches. This is detected
721 * by comparing the second fetch of rcu_state.gp_seq with the
722 * previous fetch from rcu_state.gp_seq.
724 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
725 * and rcu_state.gp_start suffice to forestall false positives.
727 gs1
= READ_ONCE(rcu_state
.gp_seq
);
728 smp_rmb(); /* Pick up ->gp_seq first... */
729 js
= READ_ONCE(rcu_state
.jiffies_stall
);
730 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
731 gps
= READ_ONCE(rcu_state
.gp_start
);
732 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
733 gs2
= READ_ONCE(rcu_state
.gp_seq
);
735 ULONG_CMP_LT(j
, js
) ||
736 ULONG_CMP_GE(gps
, js
))
737 return; /* No stall or GP completed since entering function. */
739 jn
= jiffies
+ ULONG_MAX
/ 2;
740 if (rcu_gp_in_progress() &&
741 (READ_ONCE(rnp
->qsmask
) & rdp
->grpmask
) &&
742 cmpxchg(&rcu_state
.jiffies_stall
, js
, jn
) == js
) {
745 * If a virtual machine is stopped by the host it can look to
746 * the watchdog like an RCU stall. Check to see if the host
749 if (kvm_check_and_clear_guest_paused())
752 /* We haven't checked in, so go dump stack. */
753 print_cpu_stall(gps
);
754 if (READ_ONCE(rcu_cpu_stall_ftrace_dump
))
755 rcu_ftrace_dump(DUMP_ALL
);
758 } else if (rcu_gp_in_progress() &&
759 ULONG_CMP_GE(j
, js
+ RCU_STALL_RAT_DELAY
) &&
760 cmpxchg(&rcu_state
.jiffies_stall
, js
, jn
) == js
) {
763 * If a virtual machine is stopped by the host it can look to
764 * the watchdog like an RCU stall. Check to see if the host
767 if (kvm_check_and_clear_guest_paused())
770 /* They had a few time units to dump stack, so complain. */
771 print_other_cpu_stall(gs2
, gps
);
772 if (READ_ONCE(rcu_cpu_stall_ftrace_dump
))
773 rcu_ftrace_dump(DUMP_ALL
);
776 if (didstall
&& READ_ONCE(rcu_state
.jiffies_stall
) == jn
) {
777 jn
= jiffies
+ 3 * rcu_jiffies_till_stall_check() + 3;
778 WRITE_ONCE(rcu_state
.jiffies_stall
, jn
);
782 //////////////////////////////////////////////////////////////////////////////
784 // RCU forward-progress mechanisms, including of callback invocation.
788 * Check to see if a failure to end RCU priority inversion was due to
789 * a CPU not passing through a quiescent state. When this happens, there
790 * is nothing that RCU priority boosting can do to help, so we shouldn't
791 * count this as an RCU priority boosting failure. A return of true says
792 * RCU priority boosting is to blame, and false says otherwise. If false
793 * is returned, the first of the CPUs to blame is stored through cpup.
794 * If there was no CPU blocking the current grace period, but also nothing
795 * in need of being boosted, *cpup is set to -1. This can happen in case
796 * of vCPU preemption while the last CPU is reporting its quiscent state,
799 * If cpup is NULL, then a lockless quick check is carried out, suitable
800 * for high-rate usage. On the other hand, if cpup is non-NULL, each
801 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
803 bool rcu_check_boost_fail(unsigned long gp_state
, int *cpup
)
808 struct rcu_node
*rnp
;
810 rcu_for_each_leaf_node(rnp
) {
812 if (data_race(READ_ONCE(rnp
->qsmask
))) {
815 if (READ_ONCE(rnp
->gp_tasks
))
821 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
825 // No CPUs without quiescent states for this rnp.
826 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
829 // Find the first holdout CPU.
830 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
831 if (rnp
->qsmask
& (1UL << (cpu
- rnp
->grplo
))) {
832 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
837 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
839 // Can't blame CPUs, so must blame RCU priority boosting.
842 EXPORT_SYMBOL_GPL(rcu_check_boost_fail
);
845 * Show the state of the grace-period kthreads.
847 void show_rcu_gp_kthreads(void)
849 unsigned long cbs
= 0;
856 struct rcu_data
*rdp
;
857 struct rcu_node
*rnp
;
858 struct task_struct
*t
= READ_ONCE(rcu_state
.gp_kthread
);
861 ja
= j
- data_race(READ_ONCE(rcu_state
.gp_activity
));
862 jr
= j
- data_race(READ_ONCE(rcu_state
.gp_req_activity
));
863 js
= j
- data_race(READ_ONCE(rcu_state
.gp_start
));
864 jw
= j
- data_race(READ_ONCE(rcu_state
.gp_wake_time
));
865 pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
866 rcu_state
.name
, gp_state_getname(rcu_state
.gp_state
),
867 data_race(READ_ONCE(rcu_state
.gp_state
)),
868 t
? data_race(READ_ONCE(t
->__state
)) : 0x1ffff, t
? t
->rt_priority
: 0xffU
,
869 js
, ja
, jr
, jw
, (long)data_race(READ_ONCE(rcu_state
.gp_wake_seq
)),
870 (long)data_race(READ_ONCE(rcu_state
.gp_seq
)),
871 (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed
)),
872 data_race(READ_ONCE(rcu_state
.gp_max
)),
873 data_race(READ_ONCE(rcu_state
.gp_flags
)));
874 rcu_for_each_node_breadth_first(rnp
) {
875 if (ULONG_CMP_GE(READ_ONCE(rcu_state
.gp_seq
), READ_ONCE(rnp
->gp_seq_needed
)) &&
876 !data_race(READ_ONCE(rnp
->qsmask
)) && !data_race(READ_ONCE(rnp
->boost_tasks
)) &&
877 !data_race(READ_ONCE(rnp
->exp_tasks
)) && !data_race(READ_ONCE(rnp
->gp_tasks
)))
879 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
880 rnp
->grplo
, rnp
->grphi
,
881 (long)data_race(READ_ONCE(rnp
->gp_seq
)),
882 (long)data_race(READ_ONCE(rnp
->gp_seq_needed
)),
883 data_race(READ_ONCE(rnp
->qsmask
)),
884 ".b"[!!data_race(READ_ONCE(rnp
->boost_kthread_task
))],
885 ".B"[!!data_race(READ_ONCE(rnp
->boost_tasks
))],
886 ".E"[!!data_race(READ_ONCE(rnp
->exp_tasks
))],
887 ".G"[!!data_race(READ_ONCE(rnp
->gp_tasks
))],
888 data_race(READ_ONCE(rnp
->n_boosts
)));
889 if (!rcu_is_leaf_node(rnp
))
891 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
892 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
893 if (READ_ONCE(rdp
->gpwrap
) ||
894 ULONG_CMP_GE(READ_ONCE(rcu_state
.gp_seq
),
895 READ_ONCE(rdp
->gp_seq_needed
)))
897 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
898 cpu
, (long)data_race(READ_ONCE(rdp
->gp_seq_needed
)));
901 for_each_possible_cpu(cpu
) {
902 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
903 cbs
+= data_race(READ_ONCE(rdp
->n_cbs_invoked
));
904 if (rcu_segcblist_is_offloaded(&rdp
->cblist
))
905 show_rcu_nocb_state(rdp
);
907 pr_info("RCU callbacks invoked since boot: %lu\n", cbs
);
908 show_rcu_tasks_gp_kthreads();
910 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads
);
913 * This function checks for grace-period requests that fail to motivate
914 * RCU to come out of its idle mode.
916 static void rcu_check_gp_start_stall(struct rcu_node
*rnp
, struct rcu_data
*rdp
,
917 const unsigned long gpssdelay
)
921 struct rcu_node
*rnp_root
= rcu_get_root();
922 static atomic_t warned
= ATOMIC_INIT(0);
924 if (!IS_ENABLED(CONFIG_PROVE_RCU
) || rcu_gp_in_progress() ||
925 ULONG_CMP_GE(READ_ONCE(rnp_root
->gp_seq
),
926 READ_ONCE(rnp_root
->gp_seq_needed
)) ||
927 !smp_load_acquire(&rcu_state
.gp_kthread
)) // Get stable kthread.
929 j
= jiffies
; /* Expensive access, and in common case don't get here. */
930 if (time_before(j
, READ_ONCE(rcu_state
.gp_req_activity
) + gpssdelay
) ||
931 time_before(j
, READ_ONCE(rcu_state
.gp_activity
) + gpssdelay
) ||
932 atomic_read(&warned
))
935 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
937 if (rcu_gp_in_progress() ||
938 ULONG_CMP_GE(READ_ONCE(rnp_root
->gp_seq
),
939 READ_ONCE(rnp_root
->gp_seq_needed
)) ||
940 time_before(j
, READ_ONCE(rcu_state
.gp_req_activity
) + gpssdelay
) ||
941 time_before(j
, READ_ONCE(rcu_state
.gp_activity
) + gpssdelay
) ||
942 atomic_read(&warned
)) {
943 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
946 /* Hold onto the leaf lock to make others see warned==1. */
949 raw_spin_lock_rcu_node(rnp_root
); /* irqs already disabled. */
951 if (rcu_gp_in_progress() ||
952 ULONG_CMP_GE(READ_ONCE(rnp_root
->gp_seq
),
953 READ_ONCE(rnp_root
->gp_seq_needed
)) ||
954 time_before(j
, READ_ONCE(rcu_state
.gp_req_activity
) + gpssdelay
) ||
955 time_before(j
, READ_ONCE(rcu_state
.gp_activity
) + gpssdelay
) ||
956 atomic_xchg(&warned
, 1)) {
958 /* irqs remain disabled. */
959 raw_spin_unlock_rcu_node(rnp_root
);
960 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
965 raw_spin_unlock_rcu_node(rnp_root
);
966 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
967 show_rcu_gp_kthreads();
971 * Do a forward-progress check for rcutorture. This is normally invoked
972 * due to an OOM event. The argument "j" gives the time period during
973 * which rcutorture would like progress to have been made.
975 void rcu_fwd_progress_check(unsigned long j
)
979 unsigned long max_cbs
= 0;
981 struct rcu_data
*rdp
;
983 if (rcu_gp_in_progress()) {
984 pr_info("%s: GP age %lu jiffies\n",
985 __func__
, jiffies
- data_race(READ_ONCE(rcu_state
.gp_start
)));
986 show_rcu_gp_kthreads();
988 pr_info("%s: Last GP end %lu jiffies ago\n",
989 __func__
, jiffies
- data_race(READ_ONCE(rcu_state
.gp_end
)));
991 rdp
= this_cpu_ptr(&rcu_data
);
992 rcu_check_gp_start_stall(rdp
->mynode
, rdp
, j
);
995 for_each_possible_cpu(cpu
) {
996 cbs
= rcu_get_n_cbs_cpu(cpu
);
1000 pr_info("%s: callbacks", __func__
);
1001 pr_cont(" %d: %lu", cpu
, cbs
);
1010 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check
);
1012 /* Commandeer a sysrq key to dump RCU's tree. */
1013 static bool sysrq_rcu
;
1014 module_param(sysrq_rcu
, bool, 0444);
1016 /* Dump grace-period-request information due to commandeered sysrq. */
1017 static void sysrq_show_rcu(int key
)
1019 show_rcu_gp_kthreads();
1022 static const struct sysrq_key_op sysrq_rcudump_op
= {
1023 .handler
= sysrq_show_rcu
,
1024 .help_msg
= "show-rcu(y)",
1025 .action_msg
= "Show RCU tree",
1026 .enable_mask
= SYSRQ_ENABLE_DUMP
,
1029 static int __init
rcu_sysrq_init(void)
1032 return register_sysrq_key('y', &sysrq_rcudump_op
);
1035 early_initcall(rcu_sysrq_init
);