]>
Commit | Line | Data |
---|---|---|
10462d6f PM |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * RCU CPU stall warnings for normal RCU grace periods | |
4 | * | |
5 | * Copyright IBM Corporation, 2019 | |
6 | * | |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> | |
8 | */ | |
9 | ||
e23344c2 PM |
10 | ////////////////////////////////////////////////////////////////////////////// |
11 | // | |
12 | // Controlling CPU stall warnings, including delay calculation. | |
10462d6f | 13 | |
32255d51 PM |
14 | /* panic() on RCU Stall sysctl. */ |
15 | int sysctl_panic_on_rcu_stall __read_mostly; | |
16 | ||
10462d6f PM |
17 | #ifdef CONFIG_PROVE_RCU |
18 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | |
19 | #else | |
20 | #define RCU_STALL_DELAY_DELTA 0 | |
21 | #endif | |
22 | ||
e23344c2 | 23 | /* Limit-check stall timeouts specified at boottime and runtime. */ |
10462d6f PM |
24 | int rcu_jiffies_till_stall_check(void) |
25 | { | |
26 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); | |
27 | ||
28 | /* | |
29 | * Limit check must be consistent with the Kconfig limits | |
30 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | |
31 | */ | |
32 | if (till_stall_check < 3) { | |
33 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); | |
34 | till_stall_check = 3; | |
35 | } else if (till_stall_check > 300) { | |
36 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); | |
37 | till_stall_check = 300; | |
38 | } | |
39 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | |
40 | } | |
41 | EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); | |
42 | ||
e23344c2 | 43 | /* Don't do RCU CPU stall warnings during long sysrq printouts. */ |
10462d6f PM |
44 | void rcu_sysrq_start(void) |
45 | { | |
46 | if (!rcu_cpu_stall_suppress) | |
47 | rcu_cpu_stall_suppress = 2; | |
48 | } | |
49 | ||
50 | void rcu_sysrq_end(void) | |
51 | { | |
52 | if (rcu_cpu_stall_suppress == 2) | |
53 | rcu_cpu_stall_suppress = 0; | |
54 | } | |
55 | ||
e23344c2 | 56 | /* Don't print RCU CPU stall warnings during a kernel panic. */ |
10462d6f PM |
57 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
58 | { | |
59 | rcu_cpu_stall_suppress = 1; | |
60 | return NOTIFY_DONE; | |
61 | } | |
62 | ||
63 | static struct notifier_block rcu_panic_block = { | |
64 | .notifier_call = rcu_panic, | |
65 | }; | |
66 | ||
67 | static int __init check_cpu_stall_init(void) | |
68 | { | |
69 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | |
70 | return 0; | |
71 | } | |
72 | early_initcall(check_cpu_stall_init); | |
3fc3d170 | 73 | |
e23344c2 PM |
74 | /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ |
75 | static void panic_on_rcu_stall(void) | |
76 | { | |
77 | if (sysctl_panic_on_rcu_stall) | |
78 | panic("RCU Stall\n"); | |
79 | } | |
80 | ||
81 | /** | |
82 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period | |
83 | * | |
84 | * Set the stall-warning timeout way off into the future, thus preventing | |
85 | * any RCU CPU stall-warning messages from appearing in the current set of | |
86 | * RCU grace periods. | |
87 | * | |
88 | * The caller must disable hard irqs. | |
89 | */ | |
90 | void rcu_cpu_stall_reset(void) | |
91 | { | |
92 | WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); | |
93 | } | |
94 | ||
95 | ////////////////////////////////////////////////////////////////////////////// | |
96 | // | |
97 | // Interaction with RCU grace periods | |
98 | ||
99 | /* Start of new grace period, so record stall time (and forcing times). */ | |
100 | static void record_gp_stall_check_time(void) | |
101 | { | |
102 | unsigned long j = jiffies; | |
103 | unsigned long j1; | |
104 | ||
59881bcd | 105 | WRITE_ONCE(rcu_state.gp_start, j); |
e23344c2 PM |
106 | j1 = rcu_jiffies_till_stall_check(); |
107 | /* Record ->gp_start before ->jiffies_stall. */ | |
108 | smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */ | |
109 | rcu_state.jiffies_resched = j + j1 / 2; | |
110 | rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); | |
111 | } | |
112 | ||
113 | /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ | |
114 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) | |
115 | { | |
116 | rdp->ticks_this_gp = 0; | |
117 | rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); | |
118 | WRITE_ONCE(rdp->last_fqs_resched, jiffies); | |
119 | } | |
120 | ||
121 | /* | |
122 | * If too much time has passed in the current grace period, and if | |
123 | * so configured, go kick the relevant kthreads. | |
124 | */ | |
125 | static void rcu_stall_kick_kthreads(void) | |
126 | { | |
127 | unsigned long j; | |
128 | ||
129 | if (!rcu_kick_kthreads) | |
130 | return; | |
131 | j = READ_ONCE(rcu_state.jiffies_kick_kthreads); | |
132 | if (time_after(jiffies, j) && rcu_state.gp_kthread && | |
133 | (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { | |
134 | WARN_ONCE(1, "Kicking %s grace-period kthread\n", | |
135 | rcu_state.name); | |
136 | rcu_ftrace_dump(DUMP_ALL); | |
137 | wake_up_process(rcu_state.gp_kthread); | |
138 | WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); | |
139 | } | |
140 | } | |
141 | ||
7ac1907c PM |
142 | /* |
143 | * Handler for the irq_work request posted about halfway into the RCU CPU | |
144 | * stall timeout, and used to detect excessive irq disabling. Set state | |
145 | * appropriately, but just complain if there is unexpected state on entry. | |
146 | */ | |
147 | static void rcu_iw_handler(struct irq_work *iwp) | |
148 | { | |
149 | struct rcu_data *rdp; | |
150 | struct rcu_node *rnp; | |
151 | ||
152 | rdp = container_of(iwp, struct rcu_data, rcu_iw); | |
153 | rnp = rdp->mynode; | |
154 | raw_spin_lock_rcu_node(rnp); | |
155 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { | |
156 | rdp->rcu_iw_gp_seq = rnp->gp_seq; | |
157 | rdp->rcu_iw_pending = false; | |
158 | } | |
159 | raw_spin_unlock_rcu_node(rnp); | |
160 | } | |
161 | ||
e23344c2 PM |
162 | ////////////////////////////////////////////////////////////////////////////// |
163 | // | |
164 | // Printing RCU CPU stall warnings | |
165 | ||
c130d2dc | 166 | #ifdef CONFIG_PREEMPT_RCU |
3fc3d170 PM |
167 | |
168 | /* | |
169 | * Dump detailed information for all tasks blocking the current RCU | |
170 | * grace period on the specified rcu_node structure. | |
171 | */ | |
172 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
173 | { | |
174 | unsigned long flags; | |
175 | struct task_struct *t; | |
176 | ||
177 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
178 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { | |
179 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
180 | return; | |
181 | } | |
182 | t = list_entry(rnp->gp_tasks->prev, | |
183 | struct task_struct, rcu_node_entry); | |
184 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | |
185 | /* | |
186 | * We could be printing a lot while holding a spinlock. | |
187 | * Avoid triggering hard lockup. | |
188 | */ | |
189 | touch_nmi_watchdog(); | |
190 | sched_show_task(t); | |
191 | } | |
192 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
193 | } | |
194 | ||
3fc3d170 PM |
195 | /* |
196 | * Scan the current list of tasks blocked within RCU read-side critical | |
197 | * sections, printing out the tid of each. | |
198 | */ | |
199 | static int rcu_print_task_stall(struct rcu_node *rnp) | |
200 | { | |
201 | struct task_struct *t; | |
202 | int ndetected = 0; | |
203 | ||
204 | if (!rcu_preempt_blocked_readers_cgp(rnp)) | |
205 | return 0; | |
21d0d79a PM |
206 | pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", |
207 | rnp->level, rnp->grplo, rnp->grphi); | |
3fc3d170 PM |
208 | t = list_entry(rnp->gp_tasks->prev, |
209 | struct task_struct, rcu_node_entry); | |
210 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | |
211 | pr_cont(" P%d", t->pid); | |
212 | ndetected++; | |
213 | } | |
21d0d79a | 214 | pr_cont("\n"); |
3fc3d170 PM |
215 | return ndetected; |
216 | } | |
217 | ||
c130d2dc | 218 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
3fc3d170 PM |
219 | |
220 | /* | |
221 | * Because preemptible RCU does not exist, we never have to check for | |
222 | * tasks blocked within RCU read-side critical sections. | |
223 | */ | |
21d0d79a | 224 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
3fc3d170 PM |
225 | { |
226 | } | |
227 | ||
228 | /* | |
229 | * Because preemptible RCU does not exist, we never have to check for | |
230 | * tasks blocked within RCU read-side critical sections. | |
231 | */ | |
232 | static int rcu_print_task_stall(struct rcu_node *rnp) | |
233 | { | |
234 | return 0; | |
235 | } | |
c130d2dc | 236 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
32255d51 | 237 | |
32255d51 PM |
238 | /* |
239 | * Dump stacks of all tasks running on stalled CPUs. First try using | |
240 | * NMIs, but fall back to manual remote stack tracing on architectures | |
241 | * that don't support NMI-based stack dumps. The NMI-triggered stack | |
242 | * traces are more accurate because they are printed by the target CPU. | |
243 | */ | |
244 | static void rcu_dump_cpu_stacks(void) | |
245 | { | |
246 | int cpu; | |
247 | unsigned long flags; | |
248 | struct rcu_node *rnp; | |
249 | ||
250 | rcu_for_each_leaf_node(rnp) { | |
251 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
252 | for_each_leaf_node_possible_cpu(rnp, cpu) | |
253 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) | |
254 | if (!trigger_single_cpu_backtrace(cpu)) | |
255 | dump_cpu_task(cpu); | |
256 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
257 | } | |
258 | } | |
259 | ||
59b73a27 PM |
260 | #ifdef CONFIG_RCU_FAST_NO_HZ |
261 | ||
262 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
263 | { | |
264 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
265 | ||
77a40f97 | 266 | sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d", |
59b73a27 | 267 | rdp->last_accelerate & 0xffff, jiffies & 0xffff, |
77a40f97 | 268 | !!rdp->tick_nohz_enabled_snap); |
59b73a27 PM |
269 | } |
270 | ||
271 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
272 | ||
273 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
274 | { | |
275 | *cp = '\0'; | |
276 | } | |
277 | ||
278 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
279 | ||
e2167b38 LJ |
280 | static const char * const gp_state_names[] = { |
281 | [RCU_GP_IDLE] = "RCU_GP_IDLE", | |
282 | [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", | |
283 | [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", | |
284 | [RCU_GP_ONOFF] = "RCU_GP_ONOFF", | |
285 | [RCU_GP_INIT] = "RCU_GP_INIT", | |
286 | [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", | |
287 | [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", | |
288 | [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", | |
289 | [RCU_GP_CLEANED] = "RCU_GP_CLEANED", | |
290 | }; | |
291 | ||
292 | /* | |
293 | * Convert a ->gp_state value to a character string. | |
294 | */ | |
295 | static const char *gp_state_getname(short gs) | |
296 | { | |
297 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) | |
298 | return "???"; | |
299 | return gp_state_names[gs]; | |
300 | } | |
301 | ||
59b73a27 PM |
302 | /* |
303 | * Print out diagnostic information for the specified stalled CPU. | |
304 | * | |
305 | * If the specified CPU is aware of the current RCU grace period, then | |
306 | * print the number of scheduling clock interrupts the CPU has taken | |
307 | * during the time that it has been aware. Otherwise, print the number | |
308 | * of RCU grace periods that this CPU is ignorant of, for example, "1" | |
309 | * if the CPU was aware of the previous grace period. | |
310 | * | |
311 | * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. | |
312 | */ | |
313 | static void print_cpu_stall_info(int cpu) | |
314 | { | |
315 | unsigned long delta; | |
316 | char fast_no_hz[72]; | |
317 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); | |
318 | char *ticks_title; | |
319 | unsigned long ticks_value; | |
320 | ||
321 | /* | |
322 | * We could be printing a lot while holding a spinlock. Avoid | |
323 | * triggering hard lockup. | |
324 | */ | |
325 | touch_nmi_watchdog(); | |
326 | ||
327 | ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); | |
328 | if (ticks_value) { | |
329 | ticks_title = "GPs behind"; | |
330 | } else { | |
331 | ticks_title = "ticks this GP"; | |
332 | ticks_value = rdp->ticks_this_gp; | |
333 | } | |
334 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); | |
335 | delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); | |
336 | pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", | |
337 | cpu, | |
338 | "O."[!!cpu_online(cpu)], | |
339 | "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], | |
340 | "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], | |
341 | !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : | |
342 | rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : | |
343 | "!."[!delta], | |
344 | ticks_value, ticks_title, | |
345 | rcu_dynticks_snap(rdp) & 0xfff, | |
346 | rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, | |
347 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), | |
348 | READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, | |
349 | fast_no_hz); | |
350 | } | |
351 | ||
e23344c2 PM |
352 | /* Complain about starvation of grace-period kthread. */ |
353 | static void rcu_check_gp_kthread_starvation(void) | |
59b73a27 | 354 | { |
e23344c2 PM |
355 | struct task_struct *gpk = rcu_state.gp_kthread; |
356 | unsigned long j; | |
357 | ||
358 | j = jiffies - READ_ONCE(rcu_state.gp_activity); | |
359 | if (j > 2 * HZ) { | |
360 | pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", | |
361 | rcu_state.name, j, | |
362 | (long)rcu_seq_current(&rcu_state.gp_seq), | |
363 | READ_ONCE(rcu_state.gp_flags), | |
364 | gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, | |
365 | gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); | |
366 | if (gpk) { | |
367 | pr_err("RCU grace-period kthread stack dump:\n"); | |
368 | sched_show_task(gpk); | |
369 | wake_up_process(gpk); | |
370 | } | |
371 | } | |
59b73a27 PM |
372 | } |
373 | ||
32255d51 PM |
374 | static void print_other_cpu_stall(unsigned long gp_seq) |
375 | { | |
376 | int cpu; | |
377 | unsigned long flags; | |
378 | unsigned long gpa; | |
379 | unsigned long j; | |
380 | int ndetected = 0; | |
21d0d79a | 381 | struct rcu_node *rnp; |
32255d51 PM |
382 | long totqlen = 0; |
383 | ||
384 | /* Kick and suppress, if so configured. */ | |
385 | rcu_stall_kick_kthreads(); | |
58c53360 | 386 | if (rcu_stall_is_suppressed()) |
32255d51 PM |
387 | return; |
388 | ||
389 | /* | |
390 | * OK, time to rat on our buddy... | |
391 | * See Documentation/RCU/stallwarn.txt for info on how to debug | |
392 | * RCU CPU stall warnings. | |
393 | */ | |
40e69ac7 | 394 | pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); |
32255d51 PM |
395 | rcu_for_each_leaf_node(rnp) { |
396 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
397 | ndetected += rcu_print_task_stall(rnp); | |
398 | if (rnp->qsmask != 0) { | |
399 | for_each_leaf_node_possible_cpu(rnp, cpu) | |
400 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { | |
401 | print_cpu_stall_info(cpu); | |
402 | ndetected++; | |
403 | } | |
404 | } | |
405 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
406 | } | |
407 | ||
32255d51 PM |
408 | for_each_possible_cpu(cpu) |
409 | totqlen += rcu_get_n_cbs_cpu(cpu); | |
40e69ac7 | 410 | pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
32255d51 PM |
411 | smp_processor_id(), (long)(jiffies - rcu_state.gp_start), |
412 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | |
413 | if (ndetected) { | |
414 | rcu_dump_cpu_stacks(); | |
415 | ||
416 | /* Complain about tasks blocking the grace period. */ | |
21d0d79a PM |
417 | rcu_for_each_leaf_node(rnp) |
418 | rcu_print_detail_task_stall_rnp(rnp); | |
32255d51 PM |
419 | } else { |
420 | if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { | |
421 | pr_err("INFO: Stall ended before state dump start\n"); | |
422 | } else { | |
423 | j = jiffies; | |
424 | gpa = READ_ONCE(rcu_state.gp_activity); | |
425 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", | |
426 | rcu_state.name, j - gpa, j, gpa, | |
427 | READ_ONCE(jiffies_till_next_fqs), | |
428 | rcu_get_root()->qsmask); | |
429 | /* In this case, the current CPU might be at fault. */ | |
430 | sched_show_task(current); | |
431 | } | |
432 | } | |
433 | /* Rewrite if needed in case of slow consoles. */ | |
434 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) | |
435 | WRITE_ONCE(rcu_state.jiffies_stall, | |
436 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | |
437 | ||
438 | rcu_check_gp_kthread_starvation(); | |
439 | ||
440 | panic_on_rcu_stall(); | |
441 | ||
442 | rcu_force_quiescent_state(); /* Kick them all. */ | |
443 | } | |
444 | ||
445 | static void print_cpu_stall(void) | |
446 | { | |
447 | int cpu; | |
448 | unsigned long flags; | |
449 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
450 | struct rcu_node *rnp = rcu_get_root(); | |
451 | long totqlen = 0; | |
452 | ||
453 | /* Kick and suppress, if so configured. */ | |
454 | rcu_stall_kick_kthreads(); | |
58c53360 | 455 | if (rcu_stall_is_suppressed()) |
32255d51 PM |
456 | return; |
457 | ||
458 | /* | |
459 | * OK, time to rat on ourselves... | |
460 | * See Documentation/RCU/stallwarn.txt for info on how to debug | |
461 | * RCU CPU stall warnings. | |
462 | */ | |
40e69ac7 | 463 | pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); |
32255d51 PM |
464 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); |
465 | print_cpu_stall_info(smp_processor_id()); | |
466 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); | |
32255d51 PM |
467 | for_each_possible_cpu(cpu) |
468 | totqlen += rcu_get_n_cbs_cpu(cpu); | |
40e69ac7 | 469 | pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", |
32255d51 PM |
470 | jiffies - rcu_state.gp_start, |
471 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | |
472 | ||
473 | rcu_check_gp_kthread_starvation(); | |
474 | ||
475 | rcu_dump_cpu_stacks(); | |
476 | ||
477 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
478 | /* Rewrite if needed in case of slow consoles. */ | |
479 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) | |
480 | WRITE_ONCE(rcu_state.jiffies_stall, | |
481 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | |
482 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
483 | ||
484 | panic_on_rcu_stall(); | |
485 | ||
486 | /* | |
487 | * Attempt to revive the RCU machinery by forcing a context switch. | |
488 | * | |
489 | * A context switch would normally allow the RCU state machine to make | |
490 | * progress and it could be we're stuck in kernel space without context | |
491 | * switches for an entirely unreasonable amount of time. | |
492 | */ | |
493 | set_tsk_need_resched(current); | |
494 | set_preempt_need_resched(); | |
495 | } | |
496 | ||
497 | static void check_cpu_stall(struct rcu_data *rdp) | |
498 | { | |
499 | unsigned long gs1; | |
500 | unsigned long gs2; | |
501 | unsigned long gps; | |
502 | unsigned long j; | |
503 | unsigned long jn; | |
504 | unsigned long js; | |
505 | struct rcu_node *rnp; | |
506 | ||
58c53360 | 507 | if ((rcu_stall_is_suppressed() && !rcu_kick_kthreads) || |
32255d51 PM |
508 | !rcu_gp_in_progress()) |
509 | return; | |
510 | rcu_stall_kick_kthreads(); | |
511 | j = jiffies; | |
512 | ||
513 | /* | |
514 | * Lots of memory barriers to reject false positives. | |
515 | * | |
516 | * The idea is to pick up rcu_state.gp_seq, then | |
517 | * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally | |
518 | * another copy of rcu_state.gp_seq. These values are updated in | |
519 | * the opposite order with memory barriers (or equivalent) during | |
520 | * grace-period initialization and cleanup. Now, a false positive | |
521 | * can occur if we get an new value of rcu_state.gp_start and a old | |
522 | * value of rcu_state.jiffies_stall. But given the memory barriers, | |
523 | * the only way that this can happen is if one grace period ends | |
524 | * and another starts between these two fetches. This is detected | |
525 | * by comparing the second fetch of rcu_state.gp_seq with the | |
526 | * previous fetch from rcu_state.gp_seq. | |
527 | * | |
528 | * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, | |
529 | * and rcu_state.gp_start suffice to forestall false positives. | |
530 | */ | |
531 | gs1 = READ_ONCE(rcu_state.gp_seq); | |
532 | smp_rmb(); /* Pick up ->gp_seq first... */ | |
533 | js = READ_ONCE(rcu_state.jiffies_stall); | |
534 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ | |
535 | gps = READ_ONCE(rcu_state.gp_start); | |
536 | smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ | |
537 | gs2 = READ_ONCE(rcu_state.gp_seq); | |
538 | if (gs1 != gs2 || | |
539 | ULONG_CMP_LT(j, js) || | |
540 | ULONG_CMP_GE(gps, js)) | |
541 | return; /* No stall or GP completed since entering function. */ | |
542 | rnp = rdp->mynode; | |
543 | jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; | |
544 | if (rcu_gp_in_progress() && | |
545 | (READ_ONCE(rnp->qsmask) & rdp->grpmask) && | |
546 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { | |
547 | ||
548 | /* We haven't checked in, so go dump stack. */ | |
549 | print_cpu_stall(); | |
cdc694b2 PM |
550 | if (rcu_cpu_stall_ftrace_dump) |
551 | rcu_ftrace_dump(DUMP_ALL); | |
32255d51 PM |
552 | |
553 | } else if (rcu_gp_in_progress() && | |
554 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && | |
555 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { | |
556 | ||
557 | /* They had a few time units to dump stack, so complain. */ | |
558 | print_other_cpu_stall(gs2); | |
cdc694b2 PM |
559 | if (rcu_cpu_stall_ftrace_dump) |
560 | rcu_ftrace_dump(DUMP_ALL); | |
32255d51 PM |
561 | } |
562 | } | |
b51bcbbf PM |
563 | |
564 | ////////////////////////////////////////////////////////////////////////////// | |
565 | // | |
566 | // RCU forward-progress mechanisms, including of callback invocation. | |
567 | ||
568 | ||
569 | /* | |
570 | * Show the state of the grace-period kthreads. | |
571 | */ | |
572 | void show_rcu_gp_kthreads(void) | |
573 | { | |
574 | int cpu; | |
575 | unsigned long j; | |
576 | unsigned long ja; | |
577 | unsigned long jr; | |
578 | unsigned long jw; | |
579 | struct rcu_data *rdp; | |
580 | struct rcu_node *rnp; | |
5648d659 | 581 | struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); |
b51bcbbf PM |
582 | |
583 | j = jiffies; | |
584 | ja = j - READ_ONCE(rcu_state.gp_activity); | |
585 | jr = j - READ_ONCE(rcu_state.gp_req_activity); | |
586 | jw = j - READ_ONCE(rcu_state.gp_wake_time); | |
587 | pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", | |
588 | rcu_state.name, gp_state_getname(rcu_state.gp_state), | |
5648d659 | 589 | rcu_state.gp_state, t ? t->state : 0x1ffffL, |
b51bcbbf PM |
590 | ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq), |
591 | (long)READ_ONCE(rcu_state.gp_seq), | |
592 | (long)READ_ONCE(rcu_get_root()->gp_seq_needed), | |
593 | READ_ONCE(rcu_state.gp_flags)); | |
594 | rcu_for_each_node_breadth_first(rnp) { | |
8ff37290 PM |
595 | if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
596 | READ_ONCE(rnp->gp_seq_needed))) | |
b51bcbbf PM |
597 | continue; |
598 | pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", | |
8ff37290 PM |
599 | rnp->grplo, rnp->grphi, (long)READ_ONCE(rnp->gp_seq), |
600 | (long)READ_ONCE(rnp->gp_seq_needed)); | |
b51bcbbf PM |
601 | if (!rcu_is_leaf_node(rnp)) |
602 | continue; | |
603 | for_each_leaf_node_possible_cpu(rnp, cpu) { | |
604 | rdp = per_cpu_ptr(&rcu_data, cpu); | |
a5b89501 | 605 | if (READ_ONCE(rdp->gpwrap) || |
8ff37290 PM |
606 | ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
607 | READ_ONCE(rdp->gp_seq_needed))) | |
b51bcbbf PM |
608 | continue; |
609 | pr_info("\tcpu %d ->gp_seq_needed %ld\n", | |
8ff37290 | 610 | cpu, (long)READ_ONCE(rdp->gp_seq_needed)); |
b51bcbbf PM |
611 | } |
612 | } | |
f7a81b12 PM |
613 | for_each_possible_cpu(cpu) { |
614 | rdp = per_cpu_ptr(&rcu_data, cpu); | |
615 | if (rcu_segcblist_is_offloaded(&rdp->cblist)) | |
616 | show_rcu_nocb_state(rdp); | |
617 | } | |
b51bcbbf PM |
618 | /* sched_show_task(rcu_state.gp_kthread); */ |
619 | } | |
620 | EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); | |
621 | ||
622 | /* | |
623 | * This function checks for grace-period requests that fail to motivate | |
624 | * RCU to come out of its idle mode. | |
625 | */ | |
626 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, | |
627 | const unsigned long gpssdelay) | |
628 | { | |
629 | unsigned long flags; | |
630 | unsigned long j; | |
631 | struct rcu_node *rnp_root = rcu_get_root(); | |
632 | static atomic_t warned = ATOMIC_INIT(0); | |
633 | ||
634 | if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || | |
8ff37290 | 635 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
5648d659 PM |
636 | READ_ONCE(rnp_root->gp_seq_needed)) || |
637 | !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. | |
b51bcbbf PM |
638 | return; |
639 | j = jiffies; /* Expensive access, and in common case don't get here. */ | |
640 | if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || | |
641 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
642 | atomic_read(&warned)) | |
643 | return; | |
644 | ||
645 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
646 | j = jiffies; | |
647 | if (rcu_gp_in_progress() || | |
8ff37290 PM |
648 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
649 | READ_ONCE(rnp_root->gp_seq_needed)) || | |
b51bcbbf PM |
650 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
651 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
652 | atomic_read(&warned)) { | |
653 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
654 | return; | |
655 | } | |
656 | /* Hold onto the leaf lock to make others see warned==1. */ | |
657 | ||
658 | if (rnp_root != rnp) | |
659 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ | |
660 | j = jiffies; | |
661 | if (rcu_gp_in_progress() || | |
8ff37290 PM |
662 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
663 | READ_ONCE(rnp_root->gp_seq_needed)) || | |
664 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || | |
665 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
b51bcbbf | 666 | atomic_xchg(&warned, 1)) { |
3ae976a7 NU |
667 | if (rnp_root != rnp) |
668 | /* irqs remain disabled. */ | |
669 | raw_spin_unlock_rcu_node(rnp_root); | |
b51bcbbf PM |
670 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
671 | return; | |
672 | } | |
673 | WARN_ON(1); | |
674 | if (rnp_root != rnp) | |
675 | raw_spin_unlock_rcu_node(rnp_root); | |
676 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
677 | show_rcu_gp_kthreads(); | |
678 | } | |
679 | ||
680 | /* | |
681 | * Do a forward-progress check for rcutorture. This is normally invoked | |
682 | * due to an OOM event. The argument "j" gives the time period during | |
683 | * which rcutorture would like progress to have been made. | |
684 | */ | |
685 | void rcu_fwd_progress_check(unsigned long j) | |
686 | { | |
687 | unsigned long cbs; | |
688 | int cpu; | |
689 | unsigned long max_cbs = 0; | |
690 | int max_cpu = -1; | |
691 | struct rcu_data *rdp; | |
692 | ||
693 | if (rcu_gp_in_progress()) { | |
694 | pr_info("%s: GP age %lu jiffies\n", | |
695 | __func__, jiffies - rcu_state.gp_start); | |
696 | show_rcu_gp_kthreads(); | |
697 | } else { | |
698 | pr_info("%s: Last GP end %lu jiffies ago\n", | |
699 | __func__, jiffies - rcu_state.gp_end); | |
700 | preempt_disable(); | |
701 | rdp = this_cpu_ptr(&rcu_data); | |
702 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); | |
703 | preempt_enable(); | |
704 | } | |
705 | for_each_possible_cpu(cpu) { | |
706 | cbs = rcu_get_n_cbs_cpu(cpu); | |
707 | if (!cbs) | |
708 | continue; | |
709 | if (max_cpu < 0) | |
710 | pr_info("%s: callbacks", __func__); | |
711 | pr_cont(" %d: %lu", cpu, cbs); | |
712 | if (cbs <= max_cbs) | |
713 | continue; | |
714 | max_cbs = cbs; | |
715 | max_cpu = cpu; | |
716 | } | |
717 | if (max_cpu >= 0) | |
718 | pr_cont("\n"); | |
719 | } | |
720 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); | |
721 | ||
722 | /* Commandeer a sysrq key to dump RCU's tree. */ | |
723 | static bool sysrq_rcu; | |
724 | module_param(sysrq_rcu, bool, 0444); | |
725 | ||
726 | /* Dump grace-period-request information due to commandeered sysrq. */ | |
727 | static void sysrq_show_rcu(int key) | |
728 | { | |
729 | show_rcu_gp_kthreads(); | |
730 | } | |
731 | ||
732 | static struct sysrq_key_op sysrq_rcudump_op = { | |
733 | .handler = sysrq_show_rcu, | |
734 | .help_msg = "show-rcu(y)", | |
735 | .action_msg = "Show RCU tree", | |
736 | .enable_mask = SYSRQ_ENABLE_DUMP, | |
737 | }; | |
738 | ||
739 | static int __init rcu_sysrq_init(void) | |
740 | { | |
741 | if (sysrq_rcu) | |
742 | return register_sysrq_key('y', &sysrq_rcudump_op); | |
743 | return 0; | |
744 | } | |
745 | early_initcall(rcu_sysrq_init); |