1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard and soft lockups on a system
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
13 #define pr_fmt(fmt) "watchdog: " fmt
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/tick.h>
22 #include <linux/sched/clock.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/isolation.h>
25 #include <linux/stop_machine.h>
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
30 static DEFINE_MUTEX(watchdog_mutex
);
32 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34 # define NMI_WATCHDOG_DEFAULT 1
36 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37 # define NMI_WATCHDOG_DEFAULT 0
40 unsigned long __read_mostly watchdog_enabled
;
41 int __read_mostly watchdog_user_enabled
= 1;
42 int __read_mostly nmi_watchdog_user_enabled
= NMI_WATCHDOG_DEFAULT
;
43 int __read_mostly soft_watchdog_user_enabled
= 1;
44 int __read_mostly watchdog_thresh
= 10;
45 static int __read_mostly nmi_watchdog_available
;
47 struct cpumask watchdog_cpumask __read_mostly
;
48 unsigned long *watchdog_cpumask_bits
= cpumask_bits(&watchdog_cpumask
);
50 #ifdef CONFIG_HARDLOCKUP_DETECTOR
53 int __read_mostly sysctl_hardlockup_all_cpu_backtrace
;
54 # endif /* CONFIG_SMP */
57 * Should we panic when a soft-lockup or hard-lockup occurs:
59 unsigned int __read_mostly hardlockup_panic
=
60 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
62 * We may not want to enable hard lockup detection by default in all cases,
63 * for example when running the kernel as a guest on a hypervisor. In these
64 * cases this function can be called to disable hard lockup detection. This
65 * function should only be executed once by the boot processor before the
66 * kernel command line parameters are parsed, because otherwise it is not
67 * possible to override this in hardlockup_panic_setup().
69 void __init
hardlockup_detector_disable(void)
71 nmi_watchdog_user_enabled
= 0;
74 static int __init
hardlockup_panic_setup(char *str
)
76 if (!strncmp(str
, "panic", 5))
78 else if (!strncmp(str
, "nopanic", 7))
80 else if (!strncmp(str
, "0", 1))
81 nmi_watchdog_user_enabled
= 0;
82 else if (!strncmp(str
, "1", 1))
83 nmi_watchdog_user_enabled
= 1;
86 __setup("nmi_watchdog=", hardlockup_panic_setup
);
88 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
91 * These functions can be overridden if an architecture implements its
92 * own hardlockup detector.
94 * watchdog_nmi_enable/disable can be implemented to start and stop when
95 * softlockup watchdog threads start and stop. The arch must select the
96 * SOFTLOCKUP_DETECTOR Kconfig.
98 int __weak
watchdog_nmi_enable(unsigned int cpu
)
100 hardlockup_detector_perf_enable();
104 void __weak
watchdog_nmi_disable(unsigned int cpu
)
106 hardlockup_detector_perf_disable();
109 /* Return 0, if a NMI watchdog is available. Error code otherwise */
110 int __weak __init
watchdog_nmi_probe(void)
112 return hardlockup_detector_perf_init();
116 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
118 * The reconfiguration steps are:
119 * watchdog_nmi_stop();
120 * update_variables();
121 * watchdog_nmi_start();
123 void __weak
watchdog_nmi_stop(void) { }
126 * watchdog_nmi_start - Start the watchdog after reconfiguration
128 * Counterpart to watchdog_nmi_stop().
130 * The following variables have been updated in update_variables() and
131 * contain the currently valid configuration:
136 void __weak
watchdog_nmi_start(void) { }
139 * lockup_detector_update_enable - Update the sysctl enable bit
141 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
142 * can't race with watchdog_nmi_disable().
144 static void lockup_detector_update_enable(void)
146 watchdog_enabled
= 0;
147 if (!watchdog_user_enabled
)
149 if (nmi_watchdog_available
&& nmi_watchdog_user_enabled
)
150 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
151 if (soft_watchdog_user_enabled
)
152 watchdog_enabled
|= SOFT_WATCHDOG_ENABLED
;
155 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
158 * Delay the soflockup report when running a known slow code.
159 * It does _not_ affect the timestamp of the last successdul reschedule.
161 #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
164 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
167 static struct cpumask watchdog_allowed_mask __read_mostly
;
169 /* Global variables, exported for sysctl */
170 unsigned int __read_mostly softlockup_panic
=
171 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
173 static bool softlockup_initialized __read_mostly
;
174 static u64 __read_mostly sample_period
;
176 /* Timestamp taken after the last successful reschedule. */
177 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
178 /* Timestamp of the last softlockup report. */
179 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts
);
180 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
181 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
182 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
183 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
184 static unsigned long soft_lockup_nmi_warn
;
186 static int __init
nowatchdog_setup(char *str
)
188 watchdog_user_enabled
= 0;
191 __setup("nowatchdog", nowatchdog_setup
);
193 static int __init
nosoftlockup_setup(char *str
)
195 soft_watchdog_user_enabled
= 0;
198 __setup("nosoftlockup", nosoftlockup_setup
);
200 static int __init
watchdog_thresh_setup(char *str
)
202 get_option(&str
, &watchdog_thresh
);
205 __setup("watchdog_thresh=", watchdog_thresh_setup
);
207 static void __lockup_detector_cleanup(void);
210 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
211 * lockups can have false positives under extreme conditions. So we generally
212 * want a higher threshold for soft lockups than for hard lockups. So we couple
213 * the thresholds with a factor: we make the soft threshold twice the amount of
214 * time the hard threshold is.
216 static int get_softlockup_thresh(void)
218 return watchdog_thresh
* 2;
222 * Returns seconds, approximately. We don't need nanosecond
223 * resolution, and we don't need to waste time with a big divide when
226 static unsigned long get_timestamp(void)
228 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
231 static void set_sample_period(void)
234 * convert watchdog_thresh from seconds to ns
235 * the divide by 5 is to give hrtimer several chances (two
236 * or three with the current relation between the soft
237 * and hard thresholds) to increment before the
238 * hardlockup detector generates a warning
240 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
241 watchdog_update_hrtimer_threshold(sample_period
);
244 static void update_report_ts(void)
246 __this_cpu_write(watchdog_report_ts
, get_timestamp());
249 /* Commands for resetting the watchdog */
250 static void update_touch_ts(void)
252 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
257 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
259 * Call when the scheduler may have stalled for legitimate reasons
260 * preventing the watchdog task from executing - e.g. the scheduler
261 * entering idle state. This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else.
264 notrace
void touch_softlockup_watchdog_sched(void)
267 * Preemption can be enabled. It doesn't matter which CPU's watchdog
268 * report period gets restarted here, so use the raw_ operation.
270 raw_cpu_write(watchdog_report_ts
, SOFTLOCKUP_DELAY_REPORT
);
273 notrace
void touch_softlockup_watchdog(void)
275 touch_softlockup_watchdog_sched();
276 wq_watchdog_touch(raw_smp_processor_id());
278 EXPORT_SYMBOL(touch_softlockup_watchdog
);
280 void touch_all_softlockup_watchdogs(void)
285 * watchdog_mutex cannpt be taken here, as this might be called
286 * from (soft)interrupt context, so the access to
287 * watchdog_allowed_cpumask might race with a concurrent update.
289 * The watchdog time stamp can race against a concurrent real
290 * update as well, the only side effect might be a cycle delay for
291 * the softlockup check.
293 for_each_cpu(cpu
, &watchdog_allowed_mask
) {
294 per_cpu(watchdog_report_ts
, cpu
) = SOFTLOCKUP_DELAY_REPORT
;
295 wq_watchdog_touch(cpu
);
299 void touch_softlockup_watchdog_sync(void)
301 __this_cpu_write(softlockup_touch_sync
, true);
302 __this_cpu_write(watchdog_report_ts
, SOFTLOCKUP_DELAY_REPORT
);
305 static int is_softlockup(unsigned long touch_ts
, unsigned long period_ts
)
307 unsigned long now
= get_timestamp();
309 if ((watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) && watchdog_thresh
){
310 /* Warn about unreasonable delays. */
311 if (time_after(now
, period_ts
+ get_softlockup_thresh()))
312 return now
- touch_ts
;
317 /* watchdog detector functions */
318 bool is_hardlockup(void)
320 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
322 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
325 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
329 static void watchdog_interrupt_count(void)
331 __this_cpu_inc(hrtimer_interrupts
);
334 static DEFINE_PER_CPU(struct completion
, softlockup_completion
);
335 static DEFINE_PER_CPU(struct cpu_stop_work
, softlockup_stop_work
);
338 * The watchdog thread function - touches the timestamp.
340 * It only runs once every sample_period seconds (4 seconds by
341 * default) to reset the softlockup timestamp. If this gets delayed
342 * for more than 2*watchdog_thresh seconds then the debug-printout
343 * triggers in watchdog_timer_fn().
345 static int softlockup_fn(void *data
)
348 complete(this_cpu_ptr(&softlockup_completion
));
353 /* watchdog kicker functions */
354 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
356 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
357 unsigned long period_ts
= __this_cpu_read(watchdog_report_ts
);
358 struct pt_regs
*regs
= get_irq_regs();
360 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
362 if (!watchdog_enabled
)
363 return HRTIMER_NORESTART
;
365 /* kick the hardlockup detector */
366 watchdog_interrupt_count();
368 /* kick the softlockup detector */
369 if (completion_done(this_cpu_ptr(&softlockup_completion
))) {
370 reinit_completion(this_cpu_ptr(&softlockup_completion
));
371 stop_one_cpu_nowait(smp_processor_id(),
373 this_cpu_ptr(&softlockup_stop_work
));
377 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
379 /* Reset the interval when touched externally by a known slow code. */
380 if (period_ts
== SOFTLOCKUP_DELAY_REPORT
) {
381 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
383 * If the time stamp was touched atomically
384 * make sure the scheduler tick is up to date.
386 __this_cpu_write(softlockup_touch_sync
, false);
390 /* Clear the guest paused flag on watchdog reset */
391 kvm_check_and_clear_guest_paused();
394 return HRTIMER_RESTART
;
397 /* check for a softlockup
398 * This is done by making sure a high priority task is
399 * being scheduled. The task touches the watchdog to
400 * indicate it is getting cpu time. If it hasn't then
401 * this is a good indication some task is hogging the cpu
403 duration
= is_softlockup(touch_ts
, period_ts
);
404 if (unlikely(duration
)) {
406 * If a virtual machine is stopped by the host it can look to
407 * the watchdog like a soft lockup, check to see if the host
408 * stopped the vm before we issue the warning
410 if (kvm_check_and_clear_guest_paused())
411 return HRTIMER_RESTART
;
414 * Prevent multiple soft-lockup reports if one cpu is already
415 * engaged in dumping all cpu back traces.
417 if (softlockup_all_cpu_backtrace
) {
418 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn
))
419 return HRTIMER_RESTART
;
422 /* Start period for the next softlockup warning. */
425 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
426 smp_processor_id(), duration
,
427 current
->comm
, task_pid_nr(current
));
429 print_irqtrace_events(current
);
435 if (softlockup_all_cpu_backtrace
) {
436 trigger_allbutself_cpu_backtrace();
437 clear_bit_unlock(0, &soft_lockup_nmi_warn
);
440 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
441 if (softlockup_panic
)
442 panic("softlockup: hung tasks");
445 return HRTIMER_RESTART
;
448 static void watchdog_enable(unsigned int cpu
)
450 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
451 struct completion
*done
= this_cpu_ptr(&softlockup_completion
);
453 WARN_ON_ONCE(cpu
!= smp_processor_id());
455 init_completion(done
);
459 * Start the timer first to prevent the NMI watchdog triggering
460 * before the timer has a chance to fire.
462 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_HARD
);
463 hrtimer
->function
= watchdog_timer_fn
;
464 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
465 HRTIMER_MODE_REL_PINNED_HARD
);
467 /* Initialize timestamp */
469 /* Enable the perf event */
470 if (watchdog_enabled
& NMI_WATCHDOG_ENABLED
)
471 watchdog_nmi_enable(cpu
);
474 static void watchdog_disable(unsigned int cpu
)
476 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
478 WARN_ON_ONCE(cpu
!= smp_processor_id());
481 * Disable the perf event first. That prevents that a large delay
482 * between disabling the timer and disabling the perf event causes
483 * the perf NMI to detect a false positive.
485 watchdog_nmi_disable(cpu
);
486 hrtimer_cancel(hrtimer
);
487 wait_for_completion(this_cpu_ptr(&softlockup_completion
));
490 static int softlockup_stop_fn(void *data
)
492 watchdog_disable(smp_processor_id());
496 static void softlockup_stop_all(void)
500 if (!softlockup_initialized
)
503 for_each_cpu(cpu
, &watchdog_allowed_mask
)
504 smp_call_on_cpu(cpu
, softlockup_stop_fn
, NULL
, false);
506 cpumask_clear(&watchdog_allowed_mask
);
509 static int softlockup_start_fn(void *data
)
511 watchdog_enable(smp_processor_id());
515 static void softlockup_start_all(void)
519 cpumask_copy(&watchdog_allowed_mask
, &watchdog_cpumask
);
520 for_each_cpu(cpu
, &watchdog_allowed_mask
)
521 smp_call_on_cpu(cpu
, softlockup_start_fn
, NULL
, false);
524 int lockup_detector_online_cpu(unsigned int cpu
)
526 if (cpumask_test_cpu(cpu
, &watchdog_allowed_mask
))
527 watchdog_enable(cpu
);
531 int lockup_detector_offline_cpu(unsigned int cpu
)
533 if (cpumask_test_cpu(cpu
, &watchdog_allowed_mask
))
534 watchdog_disable(cpu
);
538 static void lockup_detector_reconfigure(void)
543 softlockup_stop_all();
545 lockup_detector_update_enable();
546 if (watchdog_enabled
&& watchdog_thresh
)
547 softlockup_start_all();
549 watchdog_nmi_start();
552 * Must be called outside the cpus locked section to prevent
553 * recursive locking in the perf code.
555 __lockup_detector_cleanup();
559 * Create the watchdog thread infrastructure and configure the detector(s).
561 * The threads are not unparked as watchdog_allowed_mask is empty. When
562 * the threads are successfully initialized, take the proper locks and
563 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
565 static __init
void lockup_detector_setup(void)
568 * If sysctl is off and watchdog got disabled on the command line,
569 * nothing to do here.
571 lockup_detector_update_enable();
573 if (!IS_ENABLED(CONFIG_SYSCTL
) &&
574 !(watchdog_enabled
&& watchdog_thresh
))
577 mutex_lock(&watchdog_mutex
);
578 lockup_detector_reconfigure();
579 softlockup_initialized
= true;
580 mutex_unlock(&watchdog_mutex
);
583 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
584 static void lockup_detector_reconfigure(void)
588 lockup_detector_update_enable();
589 watchdog_nmi_start();
592 static inline void lockup_detector_setup(void)
594 lockup_detector_reconfigure();
596 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
598 static void __lockup_detector_cleanup(void)
600 lockdep_assert_held(&watchdog_mutex
);
601 hardlockup_detector_perf_cleanup();
605 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
607 * Caller must not hold the cpu hotplug rwsem.
609 void lockup_detector_cleanup(void)
611 mutex_lock(&watchdog_mutex
);
612 __lockup_detector_cleanup();
613 mutex_unlock(&watchdog_mutex
);
617 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
619 * Special interface for parisc. It prevents lockup detector warnings from
620 * the default pm_poweroff() function which busy loops forever.
622 void lockup_detector_soft_poweroff(void)
624 watchdog_enabled
= 0;
629 /* Propagate any changes to the watchdog threads */
630 static void proc_watchdog_update(void)
632 /* Remove impossible cpus to keep sysctl output clean. */
633 cpumask_and(&watchdog_cpumask
, &watchdog_cpumask
, cpu_possible_mask
);
634 lockup_detector_reconfigure();
638 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
640 * caller | table->data points to | 'which'
641 * -------------------|----------------------------|--------------------------
642 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
643 * | | SOFT_WATCHDOG_ENABLED
644 * -------------------|----------------------------|--------------------------
645 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
646 * -------------------|----------------------------|--------------------------
647 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
649 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
650 void *buffer
, size_t *lenp
, loff_t
*ppos
)
652 int err
, old
, *param
= table
->data
;
654 mutex_lock(&watchdog_mutex
);
658 * On read synchronize the userspace interface. This is a
661 *param
= (watchdog_enabled
& which
) != 0;
662 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
664 old
= READ_ONCE(*param
);
665 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
666 if (!err
&& old
!= READ_ONCE(*param
))
667 proc_watchdog_update();
669 mutex_unlock(&watchdog_mutex
);
674 * /proc/sys/kernel/watchdog
676 int proc_watchdog(struct ctl_table
*table
, int write
,
677 void *buffer
, size_t *lenp
, loff_t
*ppos
)
679 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
680 table
, write
, buffer
, lenp
, ppos
);
684 * /proc/sys/kernel/nmi_watchdog
686 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
687 void *buffer
, size_t *lenp
, loff_t
*ppos
)
689 if (!nmi_watchdog_available
&& write
)
691 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
692 table
, write
, buffer
, lenp
, ppos
);
696 * /proc/sys/kernel/soft_watchdog
698 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
699 void *buffer
, size_t *lenp
, loff_t
*ppos
)
701 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
702 table
, write
, buffer
, lenp
, ppos
);
706 * /proc/sys/kernel/watchdog_thresh
708 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
709 void *buffer
, size_t *lenp
, loff_t
*ppos
)
713 mutex_lock(&watchdog_mutex
);
715 old
= READ_ONCE(watchdog_thresh
);
716 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
718 if (!err
&& write
&& old
!= READ_ONCE(watchdog_thresh
))
719 proc_watchdog_update();
721 mutex_unlock(&watchdog_mutex
);
726 * The cpumask is the mask of possible cpus that the watchdog can run
727 * on, not the mask of cpus it is actually running on. This allows the
728 * user to specify a mask that will include cpus that have not yet
729 * been brought online, if desired.
731 int proc_watchdog_cpumask(struct ctl_table
*table
, int write
,
732 void *buffer
, size_t *lenp
, loff_t
*ppos
)
736 mutex_lock(&watchdog_mutex
);
738 err
= proc_do_large_bitmap(table
, write
, buffer
, lenp
, ppos
);
740 proc_watchdog_update();
742 mutex_unlock(&watchdog_mutex
);
745 #endif /* CONFIG_SYSCTL */
747 void __init
lockup_detector_init(void)
749 if (tick_nohz_full_enabled())
750 pr_info("Disabling watchdog on nohz_full cores by default\n");
752 cpumask_copy(&watchdog_cpumask
,
753 housekeeping_cpumask(HK_FLAG_TIMER
));
755 if (!watchdog_nmi_probe())
756 nmi_watchdog_available
= true;
757 lockup_detector_setup();