]>
Commit | Line | Data |
---|---|---|
10462d6f PM |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * RCU CPU stall warnings for normal RCU grace periods | |
4 | * | |
5 | * Copyright IBM Corporation, 2019 | |
6 | * | |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> | |
8 | */ | |
9 | ||
e23344c2 PM |
10 | ////////////////////////////////////////////////////////////////////////////// |
11 | // | |
12 | // Controlling CPU stall warnings, including delay calculation. | |
10462d6f | 13 | |
32255d51 PM |
14 | /* panic() on RCU Stall sysctl. */ |
15 | int sysctl_panic_on_rcu_stall __read_mostly; | |
16 | ||
10462d6f | 17 | #ifdef CONFIG_PROVE_RCU |
6be7436d | 18 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
10462d6f | 19 | #else |
6be7436d | 20 | #define RCU_STALL_DELAY_DELTA 0 |
10462d6f | 21 | #endif |
6be7436d PM |
22 | #define RCU_STALL_MIGHT_DIV 8 |
23 | #define RCU_STALL_MIGHT_MIN (2 * HZ) | |
10462d6f | 24 | |
e23344c2 | 25 | /* Limit-check stall timeouts specified at boottime and runtime. */ |
10462d6f PM |
26 | int rcu_jiffies_till_stall_check(void) |
27 | { | |
28 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); | |
29 | ||
30 | /* | |
31 | * Limit check must be consistent with the Kconfig limits | |
32 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | |
33 | */ | |
34 | if (till_stall_check < 3) { | |
35 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); | |
36 | till_stall_check = 3; | |
37 | } else if (till_stall_check > 300) { | |
38 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); | |
39 | till_stall_check = 300; | |
40 | } | |
41 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | |
42 | } | |
43 | EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); | |
44 | ||
6be7436d PM |
45 | /** |
46 | * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled? | |
47 | * | |
48 | * Returns @true if the current grace period is sufficiently old that | |
49 | * it is reasonable to assume that it might be stalled. This can be | |
50 | * useful when deciding whether to allocate memory to enable RCU-mediated | |
51 | * freeing on the one hand or just invoking synchronize_rcu() on the other. | |
52 | * The latter is preferable when the grace period is stalled. | |
53 | * | |
54 | * Note that sampling of the .gp_start and .gp_seq fields must be done | |
55 | * carefully to avoid false positives at the beginnings and ends of | |
56 | * grace periods. | |
57 | */ | |
58 | bool rcu_gp_might_be_stalled(void) | |
59 | { | |
60 | unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; | |
61 | unsigned long j = jiffies; | |
62 | ||
63 | if (d < RCU_STALL_MIGHT_MIN) | |
64 | d = RCU_STALL_MIGHT_MIN; | |
65 | smp_mb(); // jiffies before .gp_seq to avoid false positives. | |
66 | if (!rcu_gp_in_progress()) | |
67 | return false; | |
68 | // Long delays at this point avoids false positive, but a delay | |
69 | // of ULONG_MAX/4 jiffies voids your no-false-positive warranty. | |
70 | smp_mb(); // .gp_seq before second .gp_start | |
71 | // And ditto here. | |
72 | return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); | |
73 | } | |
74 | ||
e23344c2 | 75 | /* Don't do RCU CPU stall warnings during long sysrq printouts. */ |
10462d6f PM |
76 | void rcu_sysrq_start(void) |
77 | { | |
78 | if (!rcu_cpu_stall_suppress) | |
79 | rcu_cpu_stall_suppress = 2; | |
80 | } | |
81 | ||
82 | void rcu_sysrq_end(void) | |
83 | { | |
84 | if (rcu_cpu_stall_suppress == 2) | |
85 | rcu_cpu_stall_suppress = 0; | |
86 | } | |
87 | ||
e23344c2 | 88 | /* Don't print RCU CPU stall warnings during a kernel panic. */ |
10462d6f PM |
89 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
90 | { | |
91 | rcu_cpu_stall_suppress = 1; | |
92 | return NOTIFY_DONE; | |
93 | } | |
94 | ||
95 | static struct notifier_block rcu_panic_block = { | |
96 | .notifier_call = rcu_panic, | |
97 | }; | |
98 | ||
99 | static int __init check_cpu_stall_init(void) | |
100 | { | |
101 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | |
102 | return 0; | |
103 | } | |
104 | early_initcall(check_cpu_stall_init); | |
3fc3d170 | 105 | |
e23344c2 PM |
106 | /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ |
107 | static void panic_on_rcu_stall(void) | |
108 | { | |
109 | if (sysctl_panic_on_rcu_stall) | |
110 | panic("RCU Stall\n"); | |
111 | } | |
112 | ||
113 | /** | |
114 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period | |
115 | * | |
116 | * Set the stall-warning timeout way off into the future, thus preventing | |
117 | * any RCU CPU stall-warning messages from appearing in the current set of | |
118 | * RCU grace periods. | |
119 | * | |
120 | * The caller must disable hard irqs. | |
121 | */ | |
122 | void rcu_cpu_stall_reset(void) | |
123 | { | |
124 | WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); | |
125 | } | |
126 | ||
127 | ////////////////////////////////////////////////////////////////////////////// | |
128 | // | |
129 | // Interaction with RCU grace periods | |
130 | ||
131 | /* Start of new grace period, so record stall time (and forcing times). */ | |
132 | static void record_gp_stall_check_time(void) | |
133 | { | |
134 | unsigned long j = jiffies; | |
135 | unsigned long j1; | |
136 | ||
59881bcd | 137 | WRITE_ONCE(rcu_state.gp_start, j); |
e23344c2 | 138 | j1 = rcu_jiffies_till_stall_check(); |
6be7436d PM |
139 | smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq. |
140 | WRITE_ONCE(rcu_state.jiffies_stall, j + j1); | |
e23344c2 PM |
141 | rcu_state.jiffies_resched = j + j1 / 2; |
142 | rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); | |
143 | } | |
144 | ||
145 | /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ | |
146 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) | |
147 | { | |
148 | rdp->ticks_this_gp = 0; | |
149 | rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); | |
150 | WRITE_ONCE(rdp->last_fqs_resched, jiffies); | |
151 | } | |
152 | ||
153 | /* | |
154 | * If too much time has passed in the current grace period, and if | |
155 | * so configured, go kick the relevant kthreads. | |
156 | */ | |
157 | static void rcu_stall_kick_kthreads(void) | |
158 | { | |
159 | unsigned long j; | |
160 | ||
161 | if (!rcu_kick_kthreads) | |
162 | return; | |
163 | j = READ_ONCE(rcu_state.jiffies_kick_kthreads); | |
164 | if (time_after(jiffies, j) && rcu_state.gp_kthread && | |
165 | (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { | |
166 | WARN_ONCE(1, "Kicking %s grace-period kthread\n", | |
167 | rcu_state.name); | |
168 | rcu_ftrace_dump(DUMP_ALL); | |
169 | wake_up_process(rcu_state.gp_kthread); | |
170 | WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); | |
171 | } | |
172 | } | |
173 | ||
7ac1907c PM |
174 | /* |
175 | * Handler for the irq_work request posted about halfway into the RCU CPU | |
176 | * stall timeout, and used to detect excessive irq disabling. Set state | |
177 | * appropriately, but just complain if there is unexpected state on entry. | |
178 | */ | |
179 | static void rcu_iw_handler(struct irq_work *iwp) | |
180 | { | |
181 | struct rcu_data *rdp; | |
182 | struct rcu_node *rnp; | |
183 | ||
184 | rdp = container_of(iwp, struct rcu_data, rcu_iw); | |
185 | rnp = rdp->mynode; | |
186 | raw_spin_lock_rcu_node(rnp); | |
187 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { | |
188 | rdp->rcu_iw_gp_seq = rnp->gp_seq; | |
189 | rdp->rcu_iw_pending = false; | |
190 | } | |
191 | raw_spin_unlock_rcu_node(rnp); | |
192 | } | |
193 | ||
e23344c2 PM |
194 | ////////////////////////////////////////////////////////////////////////////// |
195 | // | |
196 | // Printing RCU CPU stall warnings | |
197 | ||
c130d2dc | 198 | #ifdef CONFIG_PREEMPT_RCU |
3fc3d170 PM |
199 | |
200 | /* | |
201 | * Dump detailed information for all tasks blocking the current RCU | |
202 | * grace period on the specified rcu_node structure. | |
203 | */ | |
204 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
205 | { | |
206 | unsigned long flags; | |
207 | struct task_struct *t; | |
208 | ||
209 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
210 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { | |
211 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
212 | return; | |
213 | } | |
214 | t = list_entry(rnp->gp_tasks->prev, | |
215 | struct task_struct, rcu_node_entry); | |
216 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | |
217 | /* | |
218 | * We could be printing a lot while holding a spinlock. | |
219 | * Avoid triggering hard lockup. | |
220 | */ | |
221 | touch_nmi_watchdog(); | |
222 | sched_show_task(t); | |
223 | } | |
224 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
225 | } | |
226 | ||
5bef8da6 PM |
227 | // Communicate task state back to the RCU CPU stall warning request. |
228 | struct rcu_stall_chk_rdr { | |
229 | int nesting; | |
230 | union rcu_special rs; | |
231 | bool on_blkd_list; | |
232 | }; | |
233 | ||
234 | /* | |
235 | * Report out the state of a not-running task that is stalling the | |
236 | * current RCU grace period. | |
237 | */ | |
238 | static bool check_slow_task(struct task_struct *t, void *arg) | |
239 | { | |
240 | struct rcu_node *rnp; | |
241 | struct rcu_stall_chk_rdr *rscrp = arg; | |
242 | ||
243 | if (task_curr(t)) | |
244 | return false; // It is running, so decline to inspect it. | |
245 | rscrp->nesting = t->rcu_read_lock_nesting; | |
246 | rscrp->rs = t->rcu_read_unlock_special; | |
247 | rnp = t->rcu_blocked_node; | |
248 | rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); | |
249 | return true; | |
250 | } | |
251 | ||
3fc3d170 PM |
252 | /* |
253 | * Scan the current list of tasks blocked within RCU read-side critical | |
254 | * sections, printing out the tid of each. | |
255 | */ | |
256 | static int rcu_print_task_stall(struct rcu_node *rnp) | |
257 | { | |
3fc3d170 | 258 | int ndetected = 0; |
5bef8da6 PM |
259 | struct rcu_stall_chk_rdr rscr; |
260 | struct task_struct *t; | |
3fc3d170 PM |
261 | |
262 | if (!rcu_preempt_blocked_readers_cgp(rnp)) | |
263 | return 0; | |
21d0d79a PM |
264 | pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", |
265 | rnp->level, rnp->grplo, rnp->grphi); | |
3fc3d170 PM |
266 | t = list_entry(rnp->gp_tasks->prev, |
267 | struct task_struct, rcu_node_entry); | |
268 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | |
5bef8da6 PM |
269 | if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr)) |
270 | pr_cont(" P%d", t->pid); | |
271 | else | |
272 | pr_cont(" P%d/%d:%c%c%c%c", | |
273 | t->pid, rscr.nesting, | |
274 | ".b"[rscr.rs.b.blocked], | |
275 | ".q"[rscr.rs.b.need_qs], | |
276 | ".e"[rscr.rs.b.exp_hint], | |
277 | ".l"[rscr.on_blkd_list]); | |
3fc3d170 PM |
278 | ndetected++; |
279 | } | |
21d0d79a | 280 | pr_cont("\n"); |
3fc3d170 PM |
281 | return ndetected; |
282 | } | |
283 | ||
c130d2dc | 284 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
3fc3d170 PM |
285 | |
286 | /* | |
287 | * Because preemptible RCU does not exist, we never have to check for | |
288 | * tasks blocked within RCU read-side critical sections. | |
289 | */ | |
21d0d79a | 290 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
3fc3d170 PM |
291 | { |
292 | } | |
293 | ||
294 | /* | |
295 | * Because preemptible RCU does not exist, we never have to check for | |
296 | * tasks blocked within RCU read-side critical sections. | |
297 | */ | |
298 | static int rcu_print_task_stall(struct rcu_node *rnp) | |
299 | { | |
300 | return 0; | |
301 | } | |
c130d2dc | 302 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
32255d51 | 303 | |
32255d51 PM |
304 | /* |
305 | * Dump stacks of all tasks running on stalled CPUs. First try using | |
306 | * NMIs, but fall back to manual remote stack tracing on architectures | |
307 | * that don't support NMI-based stack dumps. The NMI-triggered stack | |
308 | * traces are more accurate because they are printed by the target CPU. | |
309 | */ | |
310 | static void rcu_dump_cpu_stacks(void) | |
311 | { | |
312 | int cpu; | |
313 | unsigned long flags; | |
314 | struct rcu_node *rnp; | |
315 | ||
316 | rcu_for_each_leaf_node(rnp) { | |
317 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
318 | for_each_leaf_node_possible_cpu(rnp, cpu) | |
319 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) | |
320 | if (!trigger_single_cpu_backtrace(cpu)) | |
321 | dump_cpu_task(cpu); | |
322 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
323 | } | |
324 | } | |
325 | ||
59b73a27 PM |
326 | #ifdef CONFIG_RCU_FAST_NO_HZ |
327 | ||
328 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
329 | { | |
330 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
331 | ||
77a40f97 | 332 | sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d", |
59b73a27 | 333 | rdp->last_accelerate & 0xffff, jiffies & 0xffff, |
77a40f97 | 334 | !!rdp->tick_nohz_enabled_snap); |
59b73a27 PM |
335 | } |
336 | ||
337 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
338 | ||
339 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
340 | { | |
341 | *cp = '\0'; | |
342 | } | |
343 | ||
344 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
345 | ||
e2167b38 LJ |
346 | static const char * const gp_state_names[] = { |
347 | [RCU_GP_IDLE] = "RCU_GP_IDLE", | |
348 | [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", | |
349 | [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", | |
350 | [RCU_GP_ONOFF] = "RCU_GP_ONOFF", | |
351 | [RCU_GP_INIT] = "RCU_GP_INIT", | |
352 | [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", | |
353 | [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", | |
354 | [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", | |
355 | [RCU_GP_CLEANED] = "RCU_GP_CLEANED", | |
356 | }; | |
357 | ||
358 | /* | |
359 | * Convert a ->gp_state value to a character string. | |
360 | */ | |
361 | static const char *gp_state_getname(short gs) | |
362 | { | |
363 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) | |
364 | return "???"; | |
365 | return gp_state_names[gs]; | |
366 | } | |
367 | ||
88375825 PM |
368 | /* Is the RCU grace-period kthread being starved of CPU time? */ |
369 | static bool rcu_is_gp_kthread_starving(unsigned long *jp) | |
370 | { | |
371 | unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); | |
372 | ||
373 | if (jp) | |
374 | *jp = j; | |
375 | return j > 2 * HZ; | |
376 | } | |
377 | ||
59b73a27 PM |
378 | /* |
379 | * Print out diagnostic information for the specified stalled CPU. | |
380 | * | |
381 | * If the specified CPU is aware of the current RCU grace period, then | |
382 | * print the number of scheduling clock interrupts the CPU has taken | |
383 | * during the time that it has been aware. Otherwise, print the number | |
384 | * of RCU grace periods that this CPU is ignorant of, for example, "1" | |
385 | * if the CPU was aware of the previous grace period. | |
386 | * | |
387 | * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. | |
388 | */ | |
389 | static void print_cpu_stall_info(int cpu) | |
390 | { | |
391 | unsigned long delta; | |
88375825 | 392 | bool falsepositive; |
59b73a27 PM |
393 | char fast_no_hz[72]; |
394 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); | |
395 | char *ticks_title; | |
396 | unsigned long ticks_value; | |
397 | ||
398 | /* | |
399 | * We could be printing a lot while holding a spinlock. Avoid | |
400 | * triggering hard lockup. | |
401 | */ | |
402 | touch_nmi_watchdog(); | |
403 | ||
404 | ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); | |
405 | if (ticks_value) { | |
406 | ticks_title = "GPs behind"; | |
407 | } else { | |
408 | ticks_title = "ticks this GP"; | |
409 | ticks_value = rdp->ticks_this_gp; | |
410 | } | |
411 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); | |
412 | delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); | |
88375825 PM |
413 | falsepositive = rcu_is_gp_kthread_starving(NULL) && |
414 | rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); | |
415 | pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n", | |
59b73a27 PM |
416 | cpu, |
417 | "O."[!!cpu_online(cpu)], | |
418 | "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], | |
419 | "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], | |
420 | !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : | |
421 | rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : | |
422 | "!."[!delta], | |
423 | ticks_value, ticks_title, | |
424 | rcu_dynticks_snap(rdp) & 0xfff, | |
425 | rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, | |
426 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), | |
88375825 PM |
427 | data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, |
428 | fast_no_hz, | |
429 | falsepositive ? " (false positive?)" : ""); | |
59b73a27 PM |
430 | } |
431 | ||
e23344c2 PM |
432 | /* Complain about starvation of grace-period kthread. */ |
433 | static void rcu_check_gp_kthread_starvation(void) | |
59b73a27 | 434 | { |
e23344c2 PM |
435 | struct task_struct *gpk = rcu_state.gp_kthread; |
436 | unsigned long j; | |
437 | ||
88375825 | 438 | if (rcu_is_gp_kthread_starving(&j)) { |
e23344c2 PM |
439 | pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", |
440 | rcu_state.name, j, | |
441 | (long)rcu_seq_current(&rcu_state.gp_seq), | |
47fbb074 | 442 | data_race(rcu_state.gp_flags), |
e23344c2 PM |
443 | gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, |
444 | gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); | |
445 | if (gpk) { | |
88375825 | 446 | pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); |
e23344c2 PM |
447 | pr_err("RCU grace-period kthread stack dump:\n"); |
448 | sched_show_task(gpk); | |
449 | wake_up_process(gpk); | |
450 | } | |
451 | } | |
59b73a27 PM |
452 | } |
453 | ||
fcbcc0e7 | 454 | static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) |
32255d51 PM |
455 | { |
456 | int cpu; | |
457 | unsigned long flags; | |
458 | unsigned long gpa; | |
459 | unsigned long j; | |
460 | int ndetected = 0; | |
21d0d79a | 461 | struct rcu_node *rnp; |
32255d51 PM |
462 | long totqlen = 0; |
463 | ||
464 | /* Kick and suppress, if so configured. */ | |
465 | rcu_stall_kick_kthreads(); | |
58c53360 | 466 | if (rcu_stall_is_suppressed()) |
32255d51 PM |
467 | return; |
468 | ||
469 | /* | |
470 | * OK, time to rat on our buddy... | |
471 | * See Documentation/RCU/stallwarn.txt for info on how to debug | |
472 | * RCU CPU stall warnings. | |
473 | */ | |
40e69ac7 | 474 | pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); |
32255d51 PM |
475 | rcu_for_each_leaf_node(rnp) { |
476 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
477 | ndetected += rcu_print_task_stall(rnp); | |
478 | if (rnp->qsmask != 0) { | |
479 | for_each_leaf_node_possible_cpu(rnp, cpu) | |
480 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { | |
481 | print_cpu_stall_info(cpu); | |
482 | ndetected++; | |
483 | } | |
484 | } | |
485 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
486 | } | |
487 | ||
32255d51 PM |
488 | for_each_possible_cpu(cpu) |
489 | totqlen += rcu_get_n_cbs_cpu(cpu); | |
40e69ac7 | 490 | pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
fcbcc0e7 | 491 | smp_processor_id(), (long)(jiffies - gps), |
32255d51 PM |
492 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
493 | if (ndetected) { | |
494 | rcu_dump_cpu_stacks(); | |
495 | ||
496 | /* Complain about tasks blocking the grace period. */ | |
21d0d79a PM |
497 | rcu_for_each_leaf_node(rnp) |
498 | rcu_print_detail_task_stall_rnp(rnp); | |
32255d51 PM |
499 | } else { |
500 | if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { | |
501 | pr_err("INFO: Stall ended before state dump start\n"); | |
502 | } else { | |
503 | j = jiffies; | |
47fbb074 | 504 | gpa = data_race(rcu_state.gp_activity); |
32255d51 PM |
505 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", |
506 | rcu_state.name, j - gpa, j, gpa, | |
47fbb074 | 507 | data_race(jiffies_till_next_fqs), |
32255d51 | 508 | rcu_get_root()->qsmask); |
32255d51 PM |
509 | } |
510 | } | |
511 | /* Rewrite if needed in case of slow consoles. */ | |
512 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) | |
513 | WRITE_ONCE(rcu_state.jiffies_stall, | |
514 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | |
515 | ||
516 | rcu_check_gp_kthread_starvation(); | |
517 | ||
518 | panic_on_rcu_stall(); | |
519 | ||
520 | rcu_force_quiescent_state(); /* Kick them all. */ | |
521 | } | |
522 | ||
fcbcc0e7 | 523 | static void print_cpu_stall(unsigned long gps) |
32255d51 PM |
524 | { |
525 | int cpu; | |
526 | unsigned long flags; | |
527 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
528 | struct rcu_node *rnp = rcu_get_root(); | |
529 | long totqlen = 0; | |
530 | ||
531 | /* Kick and suppress, if so configured. */ | |
532 | rcu_stall_kick_kthreads(); | |
58c53360 | 533 | if (rcu_stall_is_suppressed()) |
32255d51 PM |
534 | return; |
535 | ||
536 | /* | |
537 | * OK, time to rat on ourselves... | |
538 | * See Documentation/RCU/stallwarn.txt for info on how to debug | |
539 | * RCU CPU stall warnings. | |
540 | */ | |
40e69ac7 | 541 | pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); |
32255d51 PM |
542 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); |
543 | print_cpu_stall_info(smp_processor_id()); | |
544 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); | |
32255d51 PM |
545 | for_each_possible_cpu(cpu) |
546 | totqlen += rcu_get_n_cbs_cpu(cpu); | |
40e69ac7 | 547 | pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", |
fcbcc0e7 | 548 | jiffies - gps, |
32255d51 PM |
549 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
550 | ||
551 | rcu_check_gp_kthread_starvation(); | |
552 | ||
553 | rcu_dump_cpu_stacks(); | |
554 | ||
555 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
556 | /* Rewrite if needed in case of slow consoles. */ | |
557 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) | |
558 | WRITE_ONCE(rcu_state.jiffies_stall, | |
559 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | |
560 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
561 | ||
562 | panic_on_rcu_stall(); | |
563 | ||
564 | /* | |
565 | * Attempt to revive the RCU machinery by forcing a context switch. | |
566 | * | |
567 | * A context switch would normally allow the RCU state machine to make | |
568 | * progress and it could be we're stuck in kernel space without context | |
569 | * switches for an entirely unreasonable amount of time. | |
570 | */ | |
571 | set_tsk_need_resched(current); | |
572 | set_preempt_need_resched(); | |
573 | } | |
574 | ||
575 | static void check_cpu_stall(struct rcu_data *rdp) | |
576 | { | |
577 | unsigned long gs1; | |
578 | unsigned long gs2; | |
579 | unsigned long gps; | |
580 | unsigned long j; | |
581 | unsigned long jn; | |
582 | unsigned long js; | |
583 | struct rcu_node *rnp; | |
584 | ||
58c53360 | 585 | if ((rcu_stall_is_suppressed() && !rcu_kick_kthreads) || |
32255d51 PM |
586 | !rcu_gp_in_progress()) |
587 | return; | |
588 | rcu_stall_kick_kthreads(); | |
589 | j = jiffies; | |
590 | ||
591 | /* | |
592 | * Lots of memory barriers to reject false positives. | |
593 | * | |
594 | * The idea is to pick up rcu_state.gp_seq, then | |
595 | * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally | |
596 | * another copy of rcu_state.gp_seq. These values are updated in | |
597 | * the opposite order with memory barriers (or equivalent) during | |
598 | * grace-period initialization and cleanup. Now, a false positive | |
599 | * can occur if we get an new value of rcu_state.gp_start and a old | |
600 | * value of rcu_state.jiffies_stall. But given the memory barriers, | |
601 | * the only way that this can happen is if one grace period ends | |
602 | * and another starts between these two fetches. This is detected | |
603 | * by comparing the second fetch of rcu_state.gp_seq with the | |
604 | * previous fetch from rcu_state.gp_seq. | |
605 | * | |
606 | * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, | |
607 | * and rcu_state.gp_start suffice to forestall false positives. | |
608 | */ | |
609 | gs1 = READ_ONCE(rcu_state.gp_seq); | |
610 | smp_rmb(); /* Pick up ->gp_seq first... */ | |
611 | js = READ_ONCE(rcu_state.jiffies_stall); | |
612 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ | |
613 | gps = READ_ONCE(rcu_state.gp_start); | |
614 | smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ | |
615 | gs2 = READ_ONCE(rcu_state.gp_seq); | |
616 | if (gs1 != gs2 || | |
617 | ULONG_CMP_LT(j, js) || | |
618 | ULONG_CMP_GE(gps, js)) | |
619 | return; /* No stall or GP completed since entering function. */ | |
620 | rnp = rdp->mynode; | |
621 | jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; | |
622 | if (rcu_gp_in_progress() && | |
623 | (READ_ONCE(rnp->qsmask) & rdp->grpmask) && | |
624 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { | |
625 | ||
626 | /* We haven't checked in, so go dump stack. */ | |
fcbcc0e7 | 627 | print_cpu_stall(gps); |
cdc694b2 PM |
628 | if (rcu_cpu_stall_ftrace_dump) |
629 | rcu_ftrace_dump(DUMP_ALL); | |
32255d51 PM |
630 | |
631 | } else if (rcu_gp_in_progress() && | |
632 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && | |
633 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { | |
634 | ||
635 | /* They had a few time units to dump stack, so complain. */ | |
fcbcc0e7 | 636 | print_other_cpu_stall(gs2, gps); |
cdc694b2 PM |
637 | if (rcu_cpu_stall_ftrace_dump) |
638 | rcu_ftrace_dump(DUMP_ALL); | |
32255d51 PM |
639 | } |
640 | } | |
b51bcbbf PM |
641 | |
642 | ////////////////////////////////////////////////////////////////////////////// | |
643 | // | |
644 | // RCU forward-progress mechanisms, including of callback invocation. | |
645 | ||
646 | ||
647 | /* | |
648 | * Show the state of the grace-period kthreads. | |
649 | */ | |
650 | void show_rcu_gp_kthreads(void) | |
651 | { | |
652 | int cpu; | |
653 | unsigned long j; | |
654 | unsigned long ja; | |
655 | unsigned long jr; | |
656 | unsigned long jw; | |
657 | struct rcu_data *rdp; | |
658 | struct rcu_node *rnp; | |
5648d659 | 659 | struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); |
b51bcbbf PM |
660 | |
661 | j = jiffies; | |
47fbb074 PM |
662 | ja = j - data_race(rcu_state.gp_activity); |
663 | jr = j - data_race(rcu_state.gp_req_activity); | |
664 | jw = j - data_race(rcu_state.gp_wake_time); | |
b51bcbbf PM |
665 | pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", |
666 | rcu_state.name, gp_state_getname(rcu_state.gp_state), | |
5648d659 | 667 | rcu_state.gp_state, t ? t->state : 0x1ffffL, |
47fbb074 PM |
668 | ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), |
669 | (long)data_race(rcu_state.gp_seq), | |
670 | (long)data_race(rcu_get_root()->gp_seq_needed), | |
671 | data_race(rcu_state.gp_flags)); | |
b51bcbbf | 672 | rcu_for_each_node_breadth_first(rnp) { |
8ff37290 PM |
673 | if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
674 | READ_ONCE(rnp->gp_seq_needed))) | |
b51bcbbf PM |
675 | continue; |
676 | pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", | |
47fbb074 PM |
677 | rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq), |
678 | (long)data_race(rnp->gp_seq_needed)); | |
b51bcbbf PM |
679 | if (!rcu_is_leaf_node(rnp)) |
680 | continue; | |
681 | for_each_leaf_node_possible_cpu(rnp, cpu) { | |
682 | rdp = per_cpu_ptr(&rcu_data, cpu); | |
a5b89501 | 683 | if (READ_ONCE(rdp->gpwrap) || |
8ff37290 PM |
684 | ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
685 | READ_ONCE(rdp->gp_seq_needed))) | |
b51bcbbf PM |
686 | continue; |
687 | pr_info("\tcpu %d ->gp_seq_needed %ld\n", | |
47fbb074 | 688 | cpu, (long)data_race(rdp->gp_seq_needed)); |
b51bcbbf PM |
689 | } |
690 | } | |
f7a81b12 PM |
691 | for_each_possible_cpu(cpu) { |
692 | rdp = per_cpu_ptr(&rcu_data, cpu); | |
693 | if (rcu_segcblist_is_offloaded(&rdp->cblist)) | |
694 | show_rcu_nocb_state(rdp); | |
695 | } | |
e21408ce | 696 | show_rcu_tasks_gp_kthreads(); |
b51bcbbf PM |
697 | } |
698 | EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); | |
699 | ||
700 | /* | |
701 | * This function checks for grace-period requests that fail to motivate | |
702 | * RCU to come out of its idle mode. | |
703 | */ | |
704 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, | |
705 | const unsigned long gpssdelay) | |
706 | { | |
707 | unsigned long flags; | |
708 | unsigned long j; | |
709 | struct rcu_node *rnp_root = rcu_get_root(); | |
710 | static atomic_t warned = ATOMIC_INIT(0); | |
711 | ||
712 | if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || | |
8ff37290 | 713 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
5648d659 PM |
714 | READ_ONCE(rnp_root->gp_seq_needed)) || |
715 | !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. | |
b51bcbbf PM |
716 | return; |
717 | j = jiffies; /* Expensive access, and in common case don't get here. */ | |
718 | if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || | |
719 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
720 | atomic_read(&warned)) | |
721 | return; | |
722 | ||
723 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
724 | j = jiffies; | |
725 | if (rcu_gp_in_progress() || | |
8ff37290 PM |
726 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
727 | READ_ONCE(rnp_root->gp_seq_needed)) || | |
b51bcbbf PM |
728 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
729 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
730 | atomic_read(&warned)) { | |
731 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
732 | return; | |
733 | } | |
734 | /* Hold onto the leaf lock to make others see warned==1. */ | |
735 | ||
736 | if (rnp_root != rnp) | |
737 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ | |
738 | j = jiffies; | |
739 | if (rcu_gp_in_progress() || | |
8ff37290 PM |
740 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
741 | READ_ONCE(rnp_root->gp_seq_needed)) || | |
742 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || | |
743 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
b51bcbbf | 744 | atomic_xchg(&warned, 1)) { |
3ae976a7 NU |
745 | if (rnp_root != rnp) |
746 | /* irqs remain disabled. */ | |
747 | raw_spin_unlock_rcu_node(rnp_root); | |
b51bcbbf PM |
748 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
749 | return; | |
750 | } | |
751 | WARN_ON(1); | |
752 | if (rnp_root != rnp) | |
753 | raw_spin_unlock_rcu_node(rnp_root); | |
754 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
755 | show_rcu_gp_kthreads(); | |
756 | } | |
757 | ||
758 | /* | |
759 | * Do a forward-progress check for rcutorture. This is normally invoked | |
760 | * due to an OOM event. The argument "j" gives the time period during | |
761 | * which rcutorture would like progress to have been made. | |
762 | */ | |
763 | void rcu_fwd_progress_check(unsigned long j) | |
764 | { | |
765 | unsigned long cbs; | |
766 | int cpu; | |
767 | unsigned long max_cbs = 0; | |
768 | int max_cpu = -1; | |
769 | struct rcu_data *rdp; | |
770 | ||
771 | if (rcu_gp_in_progress()) { | |
772 | pr_info("%s: GP age %lu jiffies\n", | |
773 | __func__, jiffies - rcu_state.gp_start); | |
774 | show_rcu_gp_kthreads(); | |
775 | } else { | |
776 | pr_info("%s: Last GP end %lu jiffies ago\n", | |
777 | __func__, jiffies - rcu_state.gp_end); | |
778 | preempt_disable(); | |
779 | rdp = this_cpu_ptr(&rcu_data); | |
780 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); | |
781 | preempt_enable(); | |
782 | } | |
783 | for_each_possible_cpu(cpu) { | |
784 | cbs = rcu_get_n_cbs_cpu(cpu); | |
785 | if (!cbs) | |
786 | continue; | |
787 | if (max_cpu < 0) | |
788 | pr_info("%s: callbacks", __func__); | |
789 | pr_cont(" %d: %lu", cpu, cbs); | |
790 | if (cbs <= max_cbs) | |
791 | continue; | |
792 | max_cbs = cbs; | |
793 | max_cpu = cpu; | |
794 | } | |
795 | if (max_cpu >= 0) | |
796 | pr_cont("\n"); | |
797 | } | |
798 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); | |
799 | ||
800 | /* Commandeer a sysrq key to dump RCU's tree. */ | |
801 | static bool sysrq_rcu; | |
802 | module_param(sysrq_rcu, bool, 0444); | |
803 | ||
804 | /* Dump grace-period-request information due to commandeered sysrq. */ | |
805 | static void sysrq_show_rcu(int key) | |
806 | { | |
807 | show_rcu_gp_kthreads(); | |
808 | } | |
809 | ||
810 | static struct sysrq_key_op sysrq_rcudump_op = { | |
811 | .handler = sysrq_show_rcu, | |
812 | .help_msg = "show-rcu(y)", | |
813 | .action_msg = "Show RCU tree", | |
814 | .enable_mask = SYSRQ_ENABLE_DUMP, | |
815 | }; | |
816 | ||
817 | static int __init rcu_sysrq_init(void) | |
818 | { | |
819 | if (sysrq_rcu) | |
820 | return register_sysrq_key('y', &sysrq_rcudump_op); | |
821 | return 0; | |
822 | } | |
823 | early_initcall(rcu_sysrq_init); |