]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/watchdog.c
kernel/watchdog: use nmi registers snapshot in hardlockup handler
[thirdparty/kernel/stable.git] / kernel / watchdog.c
1 /*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24
25 #include <asm/irq_regs.h>
26 #include <linux/kvm_para.h>
27 #include <linux/perf_event.h>
28 #include <linux/kthread.h>
29
30 /*
31 * The run state of the lockup detectors is controlled by the content of the
32 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
33 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
34 *
35 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
36 * are variables that are only used as an 'interface' between the parameters
37 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
38 * 'watchdog_thresh' variable is handled differently because its value is not
39 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
40 * is equal zero.
41 */
42 #define NMI_WATCHDOG_ENABLED_BIT 0
43 #define SOFT_WATCHDOG_ENABLED_BIT 1
44 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
45 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
46
47 static DEFINE_MUTEX(watchdog_proc_mutex);
48
49 #ifdef CONFIG_HARDLOCKUP_DETECTOR
50 static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
51 #else
52 static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
53 #endif
54 int __read_mostly nmi_watchdog_enabled;
55 int __read_mostly soft_watchdog_enabled;
56 int __read_mostly watchdog_user_enabled;
57 int __read_mostly watchdog_thresh = 10;
58
59 #ifdef CONFIG_SMP
60 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
61 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
62 #else
63 #define sysctl_softlockup_all_cpu_backtrace 0
64 #define sysctl_hardlockup_all_cpu_backtrace 0
65 #endif
66 static struct cpumask watchdog_cpumask __read_mostly;
67 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
68
69 /* Helper for online, unparked cpus. */
70 #define for_each_watchdog_cpu(cpu) \
71 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
72
73 /*
74 * The 'watchdog_running' variable is set to 1 when the watchdog threads
75 * are registered/started and is set to 0 when the watchdog threads are
76 * unregistered/stopped, so it is an indicator whether the threads exist.
77 */
78 static int __read_mostly watchdog_running;
79 /*
80 * If a subsystem has a need to deactivate the watchdog temporarily, it
81 * can use the suspend/resume interface to achieve this. The content of
82 * the 'watchdog_suspended' variable reflects this state. Existing threads
83 * are parked/unparked by the lockup_detector_{suspend|resume} functions
84 * (see comment blocks pertaining to those functions for further details).
85 *
86 * 'watchdog_suspended' also prevents threads from being registered/started
87 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
88 * of 'watchdog_running' cannot change while the watchdog is deactivated
89 * temporarily (see related code in 'proc' handlers).
90 */
91 static int __read_mostly watchdog_suspended;
92
93 static u64 __read_mostly sample_period;
94
95 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
96 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
97 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
98 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
99 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
100 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
101 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
102 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
103 #ifdef CONFIG_HARDLOCKUP_DETECTOR
104 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
105 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
106 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
107 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
108 #endif
109 static unsigned long soft_lockup_nmi_warn;
110
111 /* boot commands */
112 /*
113 * Should we panic when a soft-lockup or hard-lockup occurs:
114 */
115 #ifdef CONFIG_HARDLOCKUP_DETECTOR
116 unsigned int __read_mostly hardlockup_panic =
117 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
118 static unsigned long hardlockup_allcpu_dumped;
119 /*
120 * We may not want to enable hard lockup detection by default in all cases,
121 * for example when running the kernel as a guest on a hypervisor. In these
122 * cases this function can be called to disable hard lockup detection. This
123 * function should only be executed once by the boot processor before the
124 * kernel command line parameters are parsed, because otherwise it is not
125 * possible to override this in hardlockup_panic_setup().
126 */
127 void hardlockup_detector_disable(void)
128 {
129 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
130 }
131
132 static int __init hardlockup_panic_setup(char *str)
133 {
134 if (!strncmp(str, "panic", 5))
135 hardlockup_panic = 1;
136 else if (!strncmp(str, "nopanic", 7))
137 hardlockup_panic = 0;
138 else if (!strncmp(str, "0", 1))
139 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
140 else if (!strncmp(str, "1", 1))
141 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
142 return 1;
143 }
144 __setup("nmi_watchdog=", hardlockup_panic_setup);
145 #endif
146
147 unsigned int __read_mostly softlockup_panic =
148 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
149
150 static int __init softlockup_panic_setup(char *str)
151 {
152 softlockup_panic = simple_strtoul(str, NULL, 0);
153
154 return 1;
155 }
156 __setup("softlockup_panic=", softlockup_panic_setup);
157
158 static int __init nowatchdog_setup(char *str)
159 {
160 watchdog_enabled = 0;
161 return 1;
162 }
163 __setup("nowatchdog", nowatchdog_setup);
164
165 static int __init nosoftlockup_setup(char *str)
166 {
167 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
168 return 1;
169 }
170 __setup("nosoftlockup", nosoftlockup_setup);
171
172 #ifdef CONFIG_SMP
173 static int __init softlockup_all_cpu_backtrace_setup(char *str)
174 {
175 sysctl_softlockup_all_cpu_backtrace =
176 !!simple_strtol(str, NULL, 0);
177 return 1;
178 }
179 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
180 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
181 {
182 sysctl_hardlockup_all_cpu_backtrace =
183 !!simple_strtol(str, NULL, 0);
184 return 1;
185 }
186 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
187 #endif
188
189 /*
190 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
191 * lockups can have false positives under extreme conditions. So we generally
192 * want a higher threshold for soft lockups than for hard lockups. So we couple
193 * the thresholds with a factor: we make the soft threshold twice the amount of
194 * time the hard threshold is.
195 */
196 static int get_softlockup_thresh(void)
197 {
198 return watchdog_thresh * 2;
199 }
200
201 /*
202 * Returns seconds, approximately. We don't need nanosecond
203 * resolution, and we don't need to waste time with a big divide when
204 * 2^30ns == 1.074s.
205 */
206 static unsigned long get_timestamp(void)
207 {
208 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
209 }
210
211 static void set_sample_period(void)
212 {
213 /*
214 * convert watchdog_thresh from seconds to ns
215 * the divide by 5 is to give hrtimer several chances (two
216 * or three with the current relation between the soft
217 * and hard thresholds) to increment before the
218 * hardlockup detector generates a warning
219 */
220 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
221 }
222
223 /* Commands for resetting the watchdog */
224 static void __touch_watchdog(void)
225 {
226 __this_cpu_write(watchdog_touch_ts, get_timestamp());
227 }
228
229 /**
230 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
231 *
232 * Call when the scheduler may have stalled for legitimate reasons
233 * preventing the watchdog task from executing - e.g. the scheduler
234 * entering idle state. This should only be used for scheduler events.
235 * Use touch_softlockup_watchdog() for everything else.
236 */
237 void touch_softlockup_watchdog_sched(void)
238 {
239 /*
240 * Preemption can be enabled. It doesn't matter which CPU's timestamp
241 * gets zeroed here, so use the raw_ operation.
242 */
243 raw_cpu_write(watchdog_touch_ts, 0);
244 }
245
246 void touch_softlockup_watchdog(void)
247 {
248 touch_softlockup_watchdog_sched();
249 wq_watchdog_touch(raw_smp_processor_id());
250 }
251 EXPORT_SYMBOL(touch_softlockup_watchdog);
252
253 void touch_all_softlockup_watchdogs(void)
254 {
255 int cpu;
256
257 /*
258 * this is done lockless
259 * do we care if a 0 races with a timestamp?
260 * all it means is the softlock check starts one cycle later
261 */
262 for_each_watchdog_cpu(cpu)
263 per_cpu(watchdog_touch_ts, cpu) = 0;
264 wq_watchdog_touch(-1);
265 }
266
267 #ifdef CONFIG_HARDLOCKUP_DETECTOR
268 void touch_nmi_watchdog(void)
269 {
270 /*
271 * Using __raw here because some code paths have
272 * preemption enabled. If preemption is enabled
273 * then interrupts should be enabled too, in which
274 * case we shouldn't have to worry about the watchdog
275 * going off.
276 */
277 raw_cpu_write(watchdog_nmi_touch, true);
278 touch_softlockup_watchdog();
279 }
280 EXPORT_SYMBOL(touch_nmi_watchdog);
281
282 #endif
283
284 void touch_softlockup_watchdog_sync(void)
285 {
286 __this_cpu_write(softlockup_touch_sync, true);
287 __this_cpu_write(watchdog_touch_ts, 0);
288 }
289
290 #ifdef CONFIG_HARDLOCKUP_DETECTOR
291 /* watchdog detector functions */
292 static bool is_hardlockup(void)
293 {
294 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
295
296 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
297 return true;
298
299 __this_cpu_write(hrtimer_interrupts_saved, hrint);
300 return false;
301 }
302 #endif
303
304 static int is_softlockup(unsigned long touch_ts)
305 {
306 unsigned long now = get_timestamp();
307
308 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
309 /* Warn about unreasonable delays. */
310 if (time_after(now, touch_ts + get_softlockup_thresh()))
311 return now - touch_ts;
312 }
313 return 0;
314 }
315
316 #ifdef CONFIG_HARDLOCKUP_DETECTOR
317
318 static struct perf_event_attr wd_hw_attr = {
319 .type = PERF_TYPE_HARDWARE,
320 .config = PERF_COUNT_HW_CPU_CYCLES,
321 .size = sizeof(struct perf_event_attr),
322 .pinned = 1,
323 .disabled = 1,
324 };
325
326 /* Callback function for perf event subsystem */
327 static void watchdog_overflow_callback(struct perf_event *event,
328 struct perf_sample_data *data,
329 struct pt_regs *regs)
330 {
331 /* Ensure the watchdog never gets throttled */
332 event->hw.interrupts = 0;
333
334 if (__this_cpu_read(watchdog_nmi_touch) == true) {
335 __this_cpu_write(watchdog_nmi_touch, false);
336 return;
337 }
338
339 /* check for a hardlockup
340 * This is done by making sure our timer interrupt
341 * is incrementing. The timer interrupt should have
342 * fired multiple times before we overflow'd. If it hasn't
343 * then this is a good indication the cpu is stuck
344 */
345 if (is_hardlockup()) {
346 int this_cpu = smp_processor_id();
347
348 /* only print hardlockups once */
349 if (__this_cpu_read(hard_watchdog_warn) == true)
350 return;
351
352 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
353 print_modules();
354 print_irqtrace_events(current);
355 if (regs)
356 show_regs(regs);
357 else
358 dump_stack();
359
360 /*
361 * Perform all-CPU dump only once to avoid multiple hardlockups
362 * generating interleaving traces
363 */
364 if (sysctl_hardlockup_all_cpu_backtrace &&
365 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
366 trigger_allbutself_cpu_backtrace();
367
368 if (hardlockup_panic)
369 nmi_panic(regs, "Hard LOCKUP");
370
371 __this_cpu_write(hard_watchdog_warn, true);
372 return;
373 }
374
375 __this_cpu_write(hard_watchdog_warn, false);
376 return;
377 }
378 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
379
380 static void watchdog_interrupt_count(void)
381 {
382 __this_cpu_inc(hrtimer_interrupts);
383 }
384
385 static int watchdog_nmi_enable(unsigned int cpu);
386 static void watchdog_nmi_disable(unsigned int cpu);
387
388 static int watchdog_enable_all_cpus(void);
389 static void watchdog_disable_all_cpus(void);
390
391 /* watchdog kicker functions */
392 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
393 {
394 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
395 struct pt_regs *regs = get_irq_regs();
396 int duration;
397 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
398
399 /* kick the hardlockup detector */
400 watchdog_interrupt_count();
401
402 /* kick the softlockup detector */
403 wake_up_process(__this_cpu_read(softlockup_watchdog));
404
405 /* .. and repeat */
406 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
407
408 if (touch_ts == 0) {
409 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
410 /*
411 * If the time stamp was touched atomically
412 * make sure the scheduler tick is up to date.
413 */
414 __this_cpu_write(softlockup_touch_sync, false);
415 sched_clock_tick();
416 }
417
418 /* Clear the guest paused flag on watchdog reset */
419 kvm_check_and_clear_guest_paused();
420 __touch_watchdog();
421 return HRTIMER_RESTART;
422 }
423
424 /* check for a softlockup
425 * This is done by making sure a high priority task is
426 * being scheduled. The task touches the watchdog to
427 * indicate it is getting cpu time. If it hasn't then
428 * this is a good indication some task is hogging the cpu
429 */
430 duration = is_softlockup(touch_ts);
431 if (unlikely(duration)) {
432 /*
433 * If a virtual machine is stopped by the host it can look to
434 * the watchdog like a soft lockup, check to see if the host
435 * stopped the vm before we issue the warning
436 */
437 if (kvm_check_and_clear_guest_paused())
438 return HRTIMER_RESTART;
439
440 /* only warn once */
441 if (__this_cpu_read(soft_watchdog_warn) == true) {
442 /*
443 * When multiple processes are causing softlockups the
444 * softlockup detector only warns on the first one
445 * because the code relies on a full quiet cycle to
446 * re-arm. The second process prevents the quiet cycle
447 * and never gets reported. Use task pointers to detect
448 * this.
449 */
450 if (__this_cpu_read(softlockup_task_ptr_saved) !=
451 current) {
452 __this_cpu_write(soft_watchdog_warn, false);
453 __touch_watchdog();
454 }
455 return HRTIMER_RESTART;
456 }
457
458 if (softlockup_all_cpu_backtrace) {
459 /* Prevent multiple soft-lockup reports if one cpu is already
460 * engaged in dumping cpu back traces
461 */
462 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
463 /* Someone else will report us. Let's give up */
464 __this_cpu_write(soft_watchdog_warn, true);
465 return HRTIMER_RESTART;
466 }
467 }
468
469 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
470 smp_processor_id(), duration,
471 current->comm, task_pid_nr(current));
472 __this_cpu_write(softlockup_task_ptr_saved, current);
473 print_modules();
474 print_irqtrace_events(current);
475 if (regs)
476 show_regs(regs);
477 else
478 dump_stack();
479
480 if (softlockup_all_cpu_backtrace) {
481 /* Avoid generating two back traces for current
482 * given that one is already made above
483 */
484 trigger_allbutself_cpu_backtrace();
485
486 clear_bit(0, &soft_lockup_nmi_warn);
487 /* Barrier to sync with other cpus */
488 smp_mb__after_atomic();
489 }
490
491 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
492 if (softlockup_panic)
493 panic("softlockup: hung tasks");
494 __this_cpu_write(soft_watchdog_warn, true);
495 } else
496 __this_cpu_write(soft_watchdog_warn, false);
497
498 return HRTIMER_RESTART;
499 }
500
501 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
502 {
503 struct sched_param param = { .sched_priority = prio };
504
505 sched_setscheduler(current, policy, &param);
506 }
507
508 static void watchdog_enable(unsigned int cpu)
509 {
510 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
511
512 /* kick off the timer for the hardlockup detector */
513 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
514 hrtimer->function = watchdog_timer_fn;
515
516 /* Enable the perf event */
517 watchdog_nmi_enable(cpu);
518
519 /* done here because hrtimer_start can only pin to smp_processor_id() */
520 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
521 HRTIMER_MODE_REL_PINNED);
522
523 /* initialize timestamp */
524 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
525 __touch_watchdog();
526 }
527
528 static void watchdog_disable(unsigned int cpu)
529 {
530 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
531
532 watchdog_set_prio(SCHED_NORMAL, 0);
533 hrtimer_cancel(hrtimer);
534 /* disable the perf event */
535 watchdog_nmi_disable(cpu);
536 }
537
538 static void watchdog_cleanup(unsigned int cpu, bool online)
539 {
540 watchdog_disable(cpu);
541 }
542
543 static int watchdog_should_run(unsigned int cpu)
544 {
545 return __this_cpu_read(hrtimer_interrupts) !=
546 __this_cpu_read(soft_lockup_hrtimer_cnt);
547 }
548
549 /*
550 * The watchdog thread function - touches the timestamp.
551 *
552 * It only runs once every sample_period seconds (4 seconds by
553 * default) to reset the softlockup timestamp. If this gets delayed
554 * for more than 2*watchdog_thresh seconds then the debug-printout
555 * triggers in watchdog_timer_fn().
556 */
557 static void watchdog(unsigned int cpu)
558 {
559 __this_cpu_write(soft_lockup_hrtimer_cnt,
560 __this_cpu_read(hrtimer_interrupts));
561 __touch_watchdog();
562
563 /*
564 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
565 * failure path. Check for failures that can occur asynchronously -
566 * for example, when CPUs are on-lined - and shut down the hardware
567 * perf event on each CPU accordingly.
568 *
569 * The only non-obvious place this bit can be cleared is through
570 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
571 * pr_info here would be too noisy as it would result in a message
572 * every few seconds if the hardlockup was disabled but the softlockup
573 * enabled.
574 */
575 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
576 watchdog_nmi_disable(cpu);
577 }
578
579 #ifdef CONFIG_HARDLOCKUP_DETECTOR
580 /*
581 * People like the simple clean cpu node info on boot.
582 * Reduce the watchdog noise by only printing messages
583 * that are different from what cpu0 displayed.
584 */
585 static unsigned long cpu0_err;
586
587 static int watchdog_nmi_enable(unsigned int cpu)
588 {
589 struct perf_event_attr *wd_attr;
590 struct perf_event *event = per_cpu(watchdog_ev, cpu);
591
592 /* nothing to do if the hard lockup detector is disabled */
593 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
594 goto out;
595
596 /* is it already setup and enabled? */
597 if (event && event->state > PERF_EVENT_STATE_OFF)
598 goto out;
599
600 /* it is setup but not enabled */
601 if (event != NULL)
602 goto out_enable;
603
604 wd_attr = &wd_hw_attr;
605 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
606
607 /* Try to register using hardware perf events */
608 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
609
610 /* save cpu0 error for future comparision */
611 if (cpu == 0 && IS_ERR(event))
612 cpu0_err = PTR_ERR(event);
613
614 if (!IS_ERR(event)) {
615 /* only print for cpu0 or different than cpu0 */
616 if (cpu == 0 || cpu0_err)
617 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
618 goto out_save;
619 }
620
621 /*
622 * Disable the hard lockup detector if _any_ CPU fails to set up
623 * set up the hardware perf event. The watchdog() function checks
624 * the NMI_WATCHDOG_ENABLED bit periodically.
625 *
626 * The barriers are for syncing up watchdog_enabled across all the
627 * cpus, as clear_bit() does not use barriers.
628 */
629 smp_mb__before_atomic();
630 clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
631 smp_mb__after_atomic();
632
633 /* skip displaying the same error again */
634 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
635 return PTR_ERR(event);
636
637 /* vary the KERN level based on the returned errno */
638 if (PTR_ERR(event) == -EOPNOTSUPP)
639 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
640 else if (PTR_ERR(event) == -ENOENT)
641 pr_warn("disabled (cpu%i): hardware events not enabled\n",
642 cpu);
643 else
644 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
645 cpu, PTR_ERR(event));
646
647 pr_info("Shutting down hard lockup detector on all cpus\n");
648
649 return PTR_ERR(event);
650
651 /* success path */
652 out_save:
653 per_cpu(watchdog_ev, cpu) = event;
654 out_enable:
655 perf_event_enable(per_cpu(watchdog_ev, cpu));
656 out:
657 return 0;
658 }
659
660 static void watchdog_nmi_disable(unsigned int cpu)
661 {
662 struct perf_event *event = per_cpu(watchdog_ev, cpu);
663
664 if (event) {
665 perf_event_disable(event);
666 per_cpu(watchdog_ev, cpu) = NULL;
667
668 /* should be in cleanup, but blocks oprofile */
669 perf_event_release_kernel(event);
670 }
671 if (cpu == 0) {
672 /* watchdog_nmi_enable() expects this to be zero initially. */
673 cpu0_err = 0;
674 }
675 }
676
677 #else
678 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
679 static void watchdog_nmi_disable(unsigned int cpu) { return; }
680 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
681
682 static struct smp_hotplug_thread watchdog_threads = {
683 .store = &softlockup_watchdog,
684 .thread_should_run = watchdog_should_run,
685 .thread_fn = watchdog,
686 .thread_comm = "watchdog/%u",
687 .setup = watchdog_enable,
688 .cleanup = watchdog_cleanup,
689 .park = watchdog_disable,
690 .unpark = watchdog_enable,
691 };
692
693 /*
694 * park all watchdog threads that are specified in 'watchdog_cpumask'
695 *
696 * This function returns an error if kthread_park() of a watchdog thread
697 * fails. In this situation, the watchdog threads of some CPUs can already
698 * be parked and the watchdog threads of other CPUs can still be runnable.
699 * Callers are expected to handle this special condition as appropriate in
700 * their context.
701 *
702 * This function may only be called in a context that is protected against
703 * races with CPU hotplug - for example, via get_online_cpus().
704 */
705 static int watchdog_park_threads(void)
706 {
707 int cpu, ret = 0;
708
709 for_each_watchdog_cpu(cpu) {
710 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
711 if (ret)
712 break;
713 }
714
715 return ret;
716 }
717
718 /*
719 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
720 *
721 * This function may only be called in a context that is protected against
722 * races with CPU hotplug - for example, via get_online_cpus().
723 */
724 static void watchdog_unpark_threads(void)
725 {
726 int cpu;
727
728 for_each_watchdog_cpu(cpu)
729 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
730 }
731
732 /*
733 * Suspend the hard and soft lockup detector by parking the watchdog threads.
734 */
735 int lockup_detector_suspend(void)
736 {
737 int ret = 0;
738
739 get_online_cpus();
740 mutex_lock(&watchdog_proc_mutex);
741 /*
742 * Multiple suspend requests can be active in parallel (counted by
743 * the 'watchdog_suspended' variable). If the watchdog threads are
744 * running, the first caller takes care that they will be parked.
745 * The state of 'watchdog_running' cannot change while a suspend
746 * request is active (see related code in 'proc' handlers).
747 */
748 if (watchdog_running && !watchdog_suspended)
749 ret = watchdog_park_threads();
750
751 if (ret == 0)
752 watchdog_suspended++;
753 else {
754 watchdog_disable_all_cpus();
755 pr_err("Failed to suspend lockup detectors, disabled\n");
756 watchdog_enabled = 0;
757 }
758
759 mutex_unlock(&watchdog_proc_mutex);
760
761 return ret;
762 }
763
764 /*
765 * Resume the hard and soft lockup detector by unparking the watchdog threads.
766 */
767 void lockup_detector_resume(void)
768 {
769 mutex_lock(&watchdog_proc_mutex);
770
771 watchdog_suspended--;
772 /*
773 * The watchdog threads are unparked if they were previously running
774 * and if there is no more active suspend request.
775 */
776 if (watchdog_running && !watchdog_suspended)
777 watchdog_unpark_threads();
778
779 mutex_unlock(&watchdog_proc_mutex);
780 put_online_cpus();
781 }
782
783 static int update_watchdog_all_cpus(void)
784 {
785 int ret;
786
787 ret = watchdog_park_threads();
788 if (ret)
789 return ret;
790
791 watchdog_unpark_threads();
792
793 return 0;
794 }
795
796 static int watchdog_enable_all_cpus(void)
797 {
798 int err = 0;
799
800 if (!watchdog_running) {
801 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
802 &watchdog_cpumask);
803 if (err)
804 pr_err("Failed to create watchdog threads, disabled\n");
805 else
806 watchdog_running = 1;
807 } else {
808 /*
809 * Enable/disable the lockup detectors or
810 * change the sample period 'on the fly'.
811 */
812 err = update_watchdog_all_cpus();
813
814 if (err) {
815 watchdog_disable_all_cpus();
816 pr_err("Failed to update lockup detectors, disabled\n");
817 }
818 }
819
820 if (err)
821 watchdog_enabled = 0;
822
823 return err;
824 }
825
826 static void watchdog_disable_all_cpus(void)
827 {
828 if (watchdog_running) {
829 watchdog_running = 0;
830 smpboot_unregister_percpu_thread(&watchdog_threads);
831 }
832 }
833
834 #ifdef CONFIG_SYSCTL
835
836 /*
837 * Update the run state of the lockup detectors.
838 */
839 static int proc_watchdog_update(void)
840 {
841 int err = 0;
842
843 /*
844 * Watchdog threads won't be started if they are already active.
845 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
846 * care of this. If those threads are already active, the sample
847 * period will be updated and the lockup detectors will be enabled
848 * or disabled 'on the fly'.
849 */
850 if (watchdog_enabled && watchdog_thresh)
851 err = watchdog_enable_all_cpus();
852 else
853 watchdog_disable_all_cpus();
854
855 return err;
856
857 }
858
859 /*
860 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
861 *
862 * caller | table->data points to | 'which' contains the flag(s)
863 * -------------------|-----------------------|-----------------------------
864 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
865 * | | with SOFT_WATCHDOG_ENABLED
866 * -------------------|-----------------------|-----------------------------
867 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
868 * -------------------|-----------------------|-----------------------------
869 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
870 */
871 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
872 void __user *buffer, size_t *lenp, loff_t *ppos)
873 {
874 int err, old, new;
875 int *watchdog_param = (int *)table->data;
876
877 get_online_cpus();
878 mutex_lock(&watchdog_proc_mutex);
879
880 if (watchdog_suspended) {
881 /* no parameter changes allowed while watchdog is suspended */
882 err = -EAGAIN;
883 goto out;
884 }
885
886 /*
887 * If the parameter is being read return the state of the corresponding
888 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
889 * run state of the lockup detectors.
890 */
891 if (!write) {
892 *watchdog_param = (watchdog_enabled & which) != 0;
893 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
894 } else {
895 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
896 if (err)
897 goto out;
898
899 /*
900 * There is a race window between fetching the current value
901 * from 'watchdog_enabled' and storing the new value. During
902 * this race window, watchdog_nmi_enable() can sneak in and
903 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
904 * The 'cmpxchg' detects this race and the loop retries.
905 */
906 do {
907 old = watchdog_enabled;
908 /*
909 * If the parameter value is not zero set the
910 * corresponding bit(s), else clear it(them).
911 */
912 if (*watchdog_param)
913 new = old | which;
914 else
915 new = old & ~which;
916 } while (cmpxchg(&watchdog_enabled, old, new) != old);
917
918 /*
919 * Update the run state of the lockup detectors. There is _no_
920 * need to check the value returned by proc_watchdog_update()
921 * and to restore the previous value of 'watchdog_enabled' as
922 * both lockup detectors are disabled if proc_watchdog_update()
923 * returns an error.
924 */
925 if (old == new)
926 goto out;
927
928 err = proc_watchdog_update();
929 }
930 out:
931 mutex_unlock(&watchdog_proc_mutex);
932 put_online_cpus();
933 return err;
934 }
935
936 /*
937 * /proc/sys/kernel/watchdog
938 */
939 int proc_watchdog(struct ctl_table *table, int write,
940 void __user *buffer, size_t *lenp, loff_t *ppos)
941 {
942 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
943 table, write, buffer, lenp, ppos);
944 }
945
946 /*
947 * /proc/sys/kernel/nmi_watchdog
948 */
949 int proc_nmi_watchdog(struct ctl_table *table, int write,
950 void __user *buffer, size_t *lenp, loff_t *ppos)
951 {
952 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
953 table, write, buffer, lenp, ppos);
954 }
955
956 /*
957 * /proc/sys/kernel/soft_watchdog
958 */
959 int proc_soft_watchdog(struct ctl_table *table, int write,
960 void __user *buffer, size_t *lenp, loff_t *ppos)
961 {
962 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
963 table, write, buffer, lenp, ppos);
964 }
965
966 /*
967 * /proc/sys/kernel/watchdog_thresh
968 */
969 int proc_watchdog_thresh(struct ctl_table *table, int write,
970 void __user *buffer, size_t *lenp, loff_t *ppos)
971 {
972 int err, old, new;
973
974 get_online_cpus();
975 mutex_lock(&watchdog_proc_mutex);
976
977 if (watchdog_suspended) {
978 /* no parameter changes allowed while watchdog is suspended */
979 err = -EAGAIN;
980 goto out;
981 }
982
983 old = ACCESS_ONCE(watchdog_thresh);
984 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
985
986 if (err || !write)
987 goto out;
988
989 /*
990 * Update the sample period. Restore on failure.
991 */
992 new = ACCESS_ONCE(watchdog_thresh);
993 if (old == new)
994 goto out;
995
996 set_sample_period();
997 err = proc_watchdog_update();
998 if (err) {
999 watchdog_thresh = old;
1000 set_sample_period();
1001 }
1002 out:
1003 mutex_unlock(&watchdog_proc_mutex);
1004 put_online_cpus();
1005 return err;
1006 }
1007
1008 /*
1009 * The cpumask is the mask of possible cpus that the watchdog can run
1010 * on, not the mask of cpus it is actually running on. This allows the
1011 * user to specify a mask that will include cpus that have not yet
1012 * been brought online, if desired.
1013 */
1014 int proc_watchdog_cpumask(struct ctl_table *table, int write,
1015 void __user *buffer, size_t *lenp, loff_t *ppos)
1016 {
1017 int err;
1018
1019 get_online_cpus();
1020 mutex_lock(&watchdog_proc_mutex);
1021
1022 if (watchdog_suspended) {
1023 /* no parameter changes allowed while watchdog is suspended */
1024 err = -EAGAIN;
1025 goto out;
1026 }
1027
1028 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
1029 if (!err && write) {
1030 /* Remove impossible cpus to keep sysctl output cleaner. */
1031 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
1032 cpu_possible_mask);
1033
1034 if (watchdog_running) {
1035 /*
1036 * Failure would be due to being unable to allocate
1037 * a temporary cpumask, so we are likely not in a
1038 * position to do much else to make things better.
1039 */
1040 if (smpboot_update_cpumask_percpu_thread(
1041 &watchdog_threads, &watchdog_cpumask) != 0)
1042 pr_err("cpumask update failed\n");
1043 }
1044 }
1045 out:
1046 mutex_unlock(&watchdog_proc_mutex);
1047 put_online_cpus();
1048 return err;
1049 }
1050
1051 #endif /* CONFIG_SYSCTL */
1052
1053 void __init lockup_detector_init(void)
1054 {
1055 set_sample_period();
1056
1057 #ifdef CONFIG_NO_HZ_FULL
1058 if (tick_nohz_full_enabled()) {
1059 pr_info("Disabling watchdog on nohz_full cores by default\n");
1060 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
1061 } else
1062 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
1063 #else
1064 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
1065 #endif
1066
1067 if (watchdog_enabled)
1068 watchdog_enable_all_cpus();
1069 }