]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - kernel/watchdog.c
cifs: Fix lease buffer length error
[thirdparty/kernel/linux.git] / kernel / watchdog.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
58687acb
DZ
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
86f5e6a7
FLVC
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
58687acb
DZ
10 * to those contributors as well.
11 */
12
5f92a7b0 13#define pr_fmt(fmt) "watchdog: " fmt
4501980a 14
58687acb
DZ
15#include <linux/mm.h>
16#include <linux/cpu.h>
17#include <linux/nmi.h>
18#include <linux/init.h>
58687acb
DZ
19#include <linux/module.h>
20#include <linux/sysctl.h>
fe4ba3c3 21#include <linux/tick.h>
e6017571 22#include <linux/sched/clock.h>
b17b0153 23#include <linux/sched/debug.h>
78634061 24#include <linux/sched/isolation.h>
9cf57731 25#include <linux/stop_machine.h>
58687acb
DZ
26
27#include <asm/irq_regs.h>
5d1c0f4a 28#include <linux/kvm_para.h>
58687acb 29
946d1977 30static DEFINE_MUTEX(watchdog_mutex);
ab992dc3 31
05a4a952 32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
09154985
TG
33# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34# define NMI_WATCHDOG_DEFAULT 1
84d56e66 35#else
09154985
TG
36# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37# define NMI_WATCHDOG_DEFAULT 0
84d56e66 38#endif
05a4a952 39
09154985
TG
40unsigned long __read_mostly watchdog_enabled;
41int __read_mostly watchdog_user_enabled = 1;
42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43int __read_mostly soft_watchdog_user_enabled = 1;
7feeb9cd 44int __read_mostly watchdog_thresh = 10;
48084abf 45static int __read_mostly nmi_watchdog_available;
7feeb9cd 46
48084abf 47static struct cpumask watchdog_allowed_mask __read_mostly;
7feeb9cd
TG
48
49struct cpumask watchdog_cpumask __read_mostly;
50unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
51
05a4a952 52#ifdef CONFIG_HARDLOCKUP_DETECTOR
05a4a952
NP
53/*
54 * Should we panic when a soft-lockup or hard-lockup occurs:
55 */
56unsigned int __read_mostly hardlockup_panic =
57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58/*
59 * We may not want to enable hard lockup detection by default in all cases,
60 * for example when running the kernel as a guest on a hypervisor. In these
61 * cases this function can be called to disable hard lockup detection. This
62 * function should only be executed once by the boot processor before the
63 * kernel command line parameters are parsed, because otherwise it is not
64 * possible to override this in hardlockup_panic_setup().
65 */
7a355820 66void __init hardlockup_detector_disable(void)
05a4a952 67{
09154985 68 nmi_watchdog_user_enabled = 0;
05a4a952
NP
69}
70
71static int __init hardlockup_panic_setup(char *str)
72{
73 if (!strncmp(str, "panic", 5))
74 hardlockup_panic = 1;
75 else if (!strncmp(str, "nopanic", 7))
76 hardlockup_panic = 0;
77 else if (!strncmp(str, "0", 1))
09154985 78 nmi_watchdog_user_enabled = 0;
05a4a952 79 else if (!strncmp(str, "1", 1))
09154985 80 nmi_watchdog_user_enabled = 1;
05a4a952
NP
81 return 1;
82}
83__setup("nmi_watchdog=", hardlockup_panic_setup);
84
368a7e2c
TG
85# ifdef CONFIG_SMP
86int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
05a4a952 87
368a7e2c
TG
88static int __init hardlockup_all_cpu_backtrace_setup(char *str)
89{
90 sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
91 return 1;
92}
93__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
94# endif /* CONFIG_SMP */
95#endif /* CONFIG_HARDLOCKUP_DETECTOR */
05a4a952 96
05a4a952
NP
97/*
98 * These functions can be overridden if an architecture implements its
99 * own hardlockup detector.
a10a842f
NP
100 *
101 * watchdog_nmi_enable/disable can be implemented to start and stop when
102 * softlockup watchdog threads start and stop. The arch must select the
103 * SOFTLOCKUP_DETECTOR Kconfig.
05a4a952
NP
104 */
105int __weak watchdog_nmi_enable(unsigned int cpu)
106{
146c9d0e 107 hardlockup_detector_perf_enable();
05a4a952
NP
108 return 0;
109}
941154bd 110
05a4a952
NP
111void __weak watchdog_nmi_disable(unsigned int cpu)
112{
941154bd 113 hardlockup_detector_perf_disable();
05a4a952
NP
114}
115
a994a314
TG
116/* Return 0, if a NMI watchdog is available. Error code otherwise */
117int __weak __init watchdog_nmi_probe(void)
118{
119 return hardlockup_detector_perf_init();
120}
121
6592ad2f 122/**
6b9dc480 123 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
6592ad2f 124 *
6b9dc480
TG
125 * The reconfiguration steps are:
126 * watchdog_nmi_stop();
6592ad2f 127 * update_variables();
6b9dc480
TG
128 * watchdog_nmi_start();
129 */
130void __weak watchdog_nmi_stop(void) { }
131
132/**
133 * watchdog_nmi_start - Start the watchdog after reconfiguration
6592ad2f 134 *
6b9dc480
TG
135 * Counterpart to watchdog_nmi_stop().
136 *
137 * The following variables have been updated in update_variables() and
138 * contain the currently valid configuration:
7feeb9cd 139 * - watchdog_enabled
a10a842f
NP
140 * - watchdog_thresh
141 * - watchdog_cpumask
a10a842f 142 */
6b9dc480 143void __weak watchdog_nmi_start(void) { }
a10a842f 144
09154985
TG
145/**
146 * lockup_detector_update_enable - Update the sysctl enable bit
147 *
148 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
149 * can't race with watchdog_nmi_disable().
150 */
151static void lockup_detector_update_enable(void)
152{
153 watchdog_enabled = 0;
154 if (!watchdog_user_enabled)
155 return;
a994a314 156 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
09154985
TG
157 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
158 if (soft_watchdog_user_enabled)
159 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
160}
161
05a4a952
NP
162#ifdef CONFIG_SOFTLOCKUP_DETECTOR
163
2b9d7f23
TG
164/* Global variables, exported for sysctl */
165unsigned int __read_mostly softlockup_panic =
166 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
2eb2527f 167
9cf57731 168static bool softlockup_initialized __read_mostly;
0f34c400 169static u64 __read_mostly sample_period;
58687acb
DZ
170
171static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
58687acb
DZ
172static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
173static DEFINE_PER_CPU(bool, softlockup_touch_sync);
58687acb 174static DEFINE_PER_CPU(bool, soft_watchdog_warn);
bcd951cf
TG
175static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
176static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
b1a8de1f 177static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
58687acb 178static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
ed235875 179static unsigned long soft_lockup_nmi_warn;
58687acb 180
58687acb
DZ
181static int __init softlockup_panic_setup(char *str)
182{
183 softlockup_panic = simple_strtoul(str, NULL, 0);
58687acb
DZ
184 return 1;
185}
186__setup("softlockup_panic=", softlockup_panic_setup);
187
188static int __init nowatchdog_setup(char *str)
189{
09154985 190 watchdog_user_enabled = 0;
58687acb
DZ
191 return 1;
192}
193__setup("nowatchdog", nowatchdog_setup);
194
58687acb
DZ
195static int __init nosoftlockup_setup(char *str)
196{
09154985 197 soft_watchdog_user_enabled = 0;
58687acb
DZ
198 return 1;
199}
200__setup("nosoftlockup", nosoftlockup_setup);
195daf66 201
11295055
LO
202static int __init watchdog_thresh_setup(char *str)
203{
204 get_option(&str, &watchdog_thresh);
205 return 1;
206}
207__setup("watchdog_thresh=", watchdog_thresh_setup);
208
ed235875 209#ifdef CONFIG_SMP
368a7e2c
TG
210int __read_mostly sysctl_softlockup_all_cpu_backtrace;
211
ed235875
AT
212static int __init softlockup_all_cpu_backtrace_setup(char *str)
213{
368a7e2c 214 sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
ed235875
AT
215 return 1;
216}
217__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
05a4a952 218#endif
58687acb 219
941154bd
TG
220static void __lockup_detector_cleanup(void);
221
4eec42f3
MSB
222/*
223 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
224 * lockups can have false positives under extreme conditions. So we generally
225 * want a higher threshold for soft lockups than for hard lockups. So we couple
226 * the thresholds with a factor: we make the soft threshold twice the amount of
227 * time the hard threshold is.
228 */
6e9101ae 229static int get_softlockup_thresh(void)
4eec42f3
MSB
230{
231 return watchdog_thresh * 2;
232}
58687acb
DZ
233
234/*
235 * Returns seconds, approximately. We don't need nanosecond
236 * resolution, and we don't need to waste time with a big divide when
237 * 2^30ns == 1.074s.
238 */
c06b4f19 239static unsigned long get_timestamp(void)
58687acb 240{
545a2bf7 241 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
58687acb
DZ
242}
243
0f34c400 244static void set_sample_period(void)
58687acb
DZ
245{
246 /*
586692a5 247 * convert watchdog_thresh from seconds to ns
86f5e6a7
FLVC
248 * the divide by 5 is to give hrtimer several chances (two
249 * or three with the current relation between the soft
250 * and hard thresholds) to increment before the
251 * hardlockup detector generates a warning
58687acb 252 */
0f34c400 253 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
7edaeb68 254 watchdog_update_hrtimer_threshold(sample_period);
58687acb
DZ
255}
256
257/* Commands for resetting the watchdog */
258static void __touch_watchdog(void)
259{
c06b4f19 260 __this_cpu_write(watchdog_touch_ts, get_timestamp());
58687acb
DZ
261}
262
03e0d461
TH
263/**
264 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
265 *
266 * Call when the scheduler may have stalled for legitimate reasons
267 * preventing the watchdog task from executing - e.g. the scheduler
268 * entering idle state. This should only be used for scheduler events.
269 * Use touch_softlockup_watchdog() for everything else.
270 */
cb9d7fd5 271notrace void touch_softlockup_watchdog_sched(void)
58687acb 272{
7861144b
AM
273 /*
274 * Preemption can be enabled. It doesn't matter which CPU's timestamp
275 * gets zeroed here, so use the raw_ operation.
276 */
277 raw_cpu_write(watchdog_touch_ts, 0);
58687acb 278}
03e0d461 279
cb9d7fd5 280notrace void touch_softlockup_watchdog(void)
03e0d461
TH
281{
282 touch_softlockup_watchdog_sched();
82607adc 283 wq_watchdog_touch(raw_smp_processor_id());
03e0d461 284}
0167c781 285EXPORT_SYMBOL(touch_softlockup_watchdog);
58687acb 286
332fbdbc 287void touch_all_softlockup_watchdogs(void)
58687acb
DZ
288{
289 int cpu;
290
291 /*
d57108d4
TG
292 * watchdog_mutex cannpt be taken here, as this might be called
293 * from (soft)interrupt context, so the access to
294 * watchdog_allowed_cpumask might race with a concurrent update.
295 *
296 * The watchdog time stamp can race against a concurrent real
297 * update as well, the only side effect might be a cycle delay for
298 * the softlockup check.
58687acb 299 */
d57108d4 300 for_each_cpu(cpu, &watchdog_allowed_mask)
58687acb 301 per_cpu(watchdog_touch_ts, cpu) = 0;
82607adc 302 wq_watchdog_touch(-1);
58687acb
DZ
303}
304
58687acb
DZ
305void touch_softlockup_watchdog_sync(void)
306{
f7f66b05
CL
307 __this_cpu_write(softlockup_touch_sync, true);
308 __this_cpu_write(watchdog_touch_ts, 0);
58687acb
DZ
309}
310
26e09c6e 311static int is_softlockup(unsigned long touch_ts)
58687acb 312{
c06b4f19 313 unsigned long now = get_timestamp();
58687acb 314
39d2da21 315 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
195daf66
UO
316 /* Warn about unreasonable delays. */
317 if (time_after(now, touch_ts + get_softlockup_thresh()))
318 return now - touch_ts;
319 }
58687acb
DZ
320 return 0;
321}
322
05a4a952
NP
323/* watchdog detector functions */
324bool is_hardlockup(void)
58687acb 325{
05a4a952 326 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
bcd951cf 327
05a4a952
NP
328 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
329 return true;
330
331 __this_cpu_write(hrtimer_interrupts_saved, hrint);
332 return false;
73ce0511 333}
05a4a952
NP
334
335static void watchdog_interrupt_count(void)
73ce0511 336{
05a4a952 337 __this_cpu_inc(hrtimer_interrupts);
73ce0511 338}
58687acb 339
be45bf53
PZ
340static DEFINE_PER_CPU(struct completion, softlockup_completion);
341static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
342
9cf57731
PZ
343/*
344 * The watchdog thread function - touches the timestamp.
345 *
346 * It only runs once every sample_period seconds (4 seconds by
347 * default) to reset the softlockup timestamp. If this gets delayed
348 * for more than 2*watchdog_thresh seconds then the debug-printout
349 * triggers in watchdog_timer_fn().
350 */
351static int softlockup_fn(void *data)
352{
353 __this_cpu_write(soft_lockup_hrtimer_cnt,
354 __this_cpu_read(hrtimer_interrupts));
355 __touch_watchdog();
be45bf53 356 complete(this_cpu_ptr(&softlockup_completion));
9cf57731
PZ
357
358 return 0;
359}
360
58687acb
DZ
361/* watchdog kicker functions */
362static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
363{
909ea964 364 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
58687acb
DZ
365 struct pt_regs *regs = get_irq_regs();
366 int duration;
ed235875 367 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
58687acb 368
01f0a027 369 if (!watchdog_enabled)
b94f5118
DZ
370 return HRTIMER_NORESTART;
371
58687acb
DZ
372 /* kick the hardlockup detector */
373 watchdog_interrupt_count();
374
375 /* kick the softlockup detector */
be45bf53
PZ
376 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
377 reinit_completion(this_cpu_ptr(&softlockup_completion));
378 stop_one_cpu_nowait(smp_processor_id(),
379 softlockup_fn, NULL,
380 this_cpu_ptr(&softlockup_stop_work));
381 }
58687acb
DZ
382
383 /* .. and repeat */
0f34c400 384 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
58687acb
DZ
385
386 if (touch_ts == 0) {
909ea964 387 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
58687acb
DZ
388 /*
389 * If the time stamp was touched atomically
390 * make sure the scheduler tick is up to date.
391 */
909ea964 392 __this_cpu_write(softlockup_touch_sync, false);
58687acb
DZ
393 sched_clock_tick();
394 }
5d1c0f4a
EM
395
396 /* Clear the guest paused flag on watchdog reset */
397 kvm_check_and_clear_guest_paused();
58687acb
DZ
398 __touch_watchdog();
399 return HRTIMER_RESTART;
400 }
401
402 /* check for a softlockup
403 * This is done by making sure a high priority task is
404 * being scheduled. The task touches the watchdog to
405 * indicate it is getting cpu time. If it hasn't then
406 * this is a good indication some task is hogging the cpu
407 */
26e09c6e 408 duration = is_softlockup(touch_ts);
58687acb 409 if (unlikely(duration)) {
5d1c0f4a
EM
410 /*
411 * If a virtual machine is stopped by the host it can look to
412 * the watchdog like a soft lockup, check to see if the host
413 * stopped the vm before we issue the warning
414 */
415 if (kvm_check_and_clear_guest_paused())
416 return HRTIMER_RESTART;
417
58687acb 418 /* only warn once */
b1a8de1f 419 if (__this_cpu_read(soft_watchdog_warn) == true) {
420 /*
421 * When multiple processes are causing softlockups the
422 * softlockup detector only warns on the first one
423 * because the code relies on a full quiet cycle to
424 * re-arm. The second process prevents the quiet cycle
425 * and never gets reported. Use task pointers to detect
426 * this.
427 */
428 if (__this_cpu_read(softlockup_task_ptr_saved) !=
429 current) {
430 __this_cpu_write(soft_watchdog_warn, false);
431 __touch_watchdog();
432 }
58687acb 433 return HRTIMER_RESTART;
b1a8de1f 434 }
58687acb 435
ed235875
AT
436 if (softlockup_all_cpu_backtrace) {
437 /* Prevent multiple soft-lockup reports if one cpu is already
438 * engaged in dumping cpu back traces
439 */
440 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
441 /* Someone else will report us. Let's give up */
442 __this_cpu_write(soft_watchdog_warn, true);
443 return HRTIMER_RESTART;
444 }
445 }
446
656c3b79 447 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
26e09c6e 448 smp_processor_id(), duration,
58687acb 449 current->comm, task_pid_nr(current));
b1a8de1f 450 __this_cpu_write(softlockup_task_ptr_saved, current);
58687acb
DZ
451 print_modules();
452 print_irqtrace_events(current);
453 if (regs)
454 show_regs(regs);
455 else
456 dump_stack();
457
ed235875
AT
458 if (softlockup_all_cpu_backtrace) {
459 /* Avoid generating two back traces for current
460 * given that one is already made above
461 */
462 trigger_allbutself_cpu_backtrace();
463
464 clear_bit(0, &soft_lockup_nmi_warn);
465 /* Barrier to sync with other cpus */
466 smp_mb__after_atomic();
467 }
468
69361eef 469 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
58687acb
DZ
470 if (softlockup_panic)
471 panic("softlockup: hung tasks");
909ea964 472 __this_cpu_write(soft_watchdog_warn, true);
58687acb 473 } else
909ea964 474 __this_cpu_write(soft_watchdog_warn, false);
58687acb
DZ
475
476 return HRTIMER_RESTART;
477}
478
bcd951cf 479static void watchdog_enable(unsigned int cpu)
58687acb 480{
01f0a027 481 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
be45bf53 482 struct completion *done = this_cpu_ptr(&softlockup_completion);
58687acb 483
9cf57731
PZ
484 WARN_ON_ONCE(cpu != smp_processor_id());
485
be45bf53
PZ
486 init_completion(done);
487 complete(done);
488
01f0a027
TG
489 /*
490 * Start the timer first to prevent the NMI watchdog triggering
491 * before the timer has a chance to fire.
492 */
3935e895
BM
493 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
494 hrtimer->function = watchdog_timer_fn;
01f0a027
TG
495 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
496 HRTIMER_MODE_REL_PINNED);
3935e895 497
01f0a027
TG
498 /* Initialize timestamp */
499 __touch_watchdog();
bcd951cf 500 /* Enable the perf event */
146c9d0e
TG
501 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
502 watchdog_nmi_enable(cpu);
bcd951cf 503}
58687acb 504
bcd951cf
TG
505static void watchdog_disable(unsigned int cpu)
506{
01f0a027 507 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
58687acb 508
9cf57731
PZ
509 WARN_ON_ONCE(cpu != smp_processor_id());
510
01f0a027
TG
511 /*
512 * Disable the perf event first. That prevents that a large delay
513 * between disabling the timer and disabling the perf event causes
514 * the perf NMI to detect a false positive.
515 */
bcd951cf 516 watchdog_nmi_disable(cpu);
01f0a027 517 hrtimer_cancel(hrtimer);
be45bf53 518 wait_for_completion(this_cpu_ptr(&softlockup_completion));
58687acb
DZ
519}
520
9cf57731 521static int softlockup_stop_fn(void *data)
b8900bc0 522{
9cf57731
PZ
523 watchdog_disable(smp_processor_id());
524 return 0;
b8900bc0
FW
525}
526
9cf57731 527static void softlockup_stop_all(void)
bcd951cf 528{
9cf57731
PZ
529 int cpu;
530
531 if (!softlockup_initialized)
532 return;
533
534 for_each_cpu(cpu, &watchdog_allowed_mask)
535 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
536
537 cpumask_clear(&watchdog_allowed_mask);
bcd951cf
TG
538}
539
9cf57731 540static int softlockup_start_fn(void *data)
bcd951cf 541{
9cf57731
PZ
542 watchdog_enable(smp_processor_id());
543 return 0;
bcd951cf 544}
58687acb 545
9cf57731 546static void softlockup_start_all(void)
2eb2527f 547{
9cf57731 548 int cpu;
2eb2527f 549
9cf57731
PZ
550 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
551 for_each_cpu(cpu, &watchdog_allowed_mask)
552 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
2eb2527f
TG
553}
554
9cf57731 555int lockup_detector_online_cpu(unsigned int cpu)
2eb2527f 556{
7dd47617
TG
557 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
558 watchdog_enable(cpu);
9cf57731 559 return 0;
2eb2527f
TG
560}
561
9cf57731 562int lockup_detector_offline_cpu(unsigned int cpu)
2eb2527f 563{
7dd47617
TG
564 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
565 watchdog_disable(cpu);
9cf57731 566 return 0;
2eb2527f
TG
567}
568
5587185d 569static void lockup_detector_reconfigure(void)
2eb2527f 570{
e31d6883 571 cpus_read_lock();
6b9dc480 572 watchdog_nmi_stop();
9cf57731
PZ
573
574 softlockup_stop_all();
2eb2527f 575 set_sample_period();
09154985
TG
576 lockup_detector_update_enable();
577 if (watchdog_enabled && watchdog_thresh)
9cf57731
PZ
578 softlockup_start_all();
579
6b9dc480 580 watchdog_nmi_start();
e31d6883
TG
581 cpus_read_unlock();
582 /*
583 * Must be called outside the cpus locked section to prevent
584 * recursive locking in the perf code.
585 */
586 __lockup_detector_cleanup();
2eb2527f
TG
587}
588
589/*
5587185d 590 * Create the watchdog thread infrastructure and configure the detector(s).
2eb2527f
TG
591 *
592 * The threads are not unparked as watchdog_allowed_mask is empty. When
593 * the threads are sucessfully initialized, take the proper locks and
594 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
595 */
5587185d 596static __init void lockup_detector_setup(void)
2eb2527f 597{
2eb2527f
TG
598 /*
599 * If sysctl is off and watchdog got disabled on the command line,
600 * nothing to do here.
601 */
09154985
TG
602 lockup_detector_update_enable();
603
2eb2527f
TG
604 if (!IS_ENABLED(CONFIG_SYSCTL) &&
605 !(watchdog_enabled && watchdog_thresh))
606 return;
607
2eb2527f 608 mutex_lock(&watchdog_mutex);
5587185d 609 lockup_detector_reconfigure();
9cf57731 610 softlockup_initialized = true;
2eb2527f
TG
611 mutex_unlock(&watchdog_mutex);
612}
613
2b9d7f23 614#else /* CONFIG_SOFTLOCKUP_DETECTOR */
5587185d 615static void lockup_detector_reconfigure(void)
6592ad2f 616{
e31d6883 617 cpus_read_lock();
6b9dc480 618 watchdog_nmi_stop();
09154985 619 lockup_detector_update_enable();
6b9dc480 620 watchdog_nmi_start();
e31d6883 621 cpus_read_unlock();
6592ad2f 622}
5587185d 623static inline void lockup_detector_setup(void)
34ddaa3e 624{
5587185d 625 lockup_detector_reconfigure();
34ddaa3e 626}
2b9d7f23 627#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
05a4a952 628
941154bd
TG
629static void __lockup_detector_cleanup(void)
630{
631 lockdep_assert_held(&watchdog_mutex);
632 hardlockup_detector_perf_cleanup();
633}
634
635/**
636 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
637 *
638 * Caller must not hold the cpu hotplug rwsem.
639 */
640void lockup_detector_cleanup(void)
641{
642 mutex_lock(&watchdog_mutex);
643 __lockup_detector_cleanup();
644 mutex_unlock(&watchdog_mutex);
645}
646
6554fd8c
TG
647/**
648 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
649 *
650 * Special interface for parisc. It prevents lockup detector warnings from
651 * the default pm_poweroff() function which busy loops forever.
652 */
653void lockup_detector_soft_poweroff(void)
654{
655 watchdog_enabled = 0;
656}
657
58cf690a
UO
658#ifdef CONFIG_SYSCTL
659
e8b62b2d 660/* Propagate any changes to the watchdog threads */
d57108d4 661static void proc_watchdog_update(void)
a0c9cbb9 662{
e8b62b2d
TG
663 /* Remove impossible cpus to keep sysctl output clean. */
664 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
5587185d 665 lockup_detector_reconfigure();
a0c9cbb9
UO
666}
667
ef246a21
UO
668/*
669 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
670 *
7feeb9cd
TG
671 * caller | table->data points to | 'which'
672 * -------------------|----------------------------|--------------------------
673 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
674 * | | SOFT_WATCHDOG_ENABLED
675 * -------------------|----------------------------|--------------------------
676 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
677 * -------------------|----------------------------|--------------------------
678 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
ef246a21
UO
679 */
680static int proc_watchdog_common(int which, struct ctl_table *table, int write,
681 void __user *buffer, size_t *lenp, loff_t *ppos)
682{
09154985 683 int err, old, *param = table->data;
ef246a21 684
946d1977 685 mutex_lock(&watchdog_mutex);
ef246a21 686
ef246a21 687 if (!write) {
09154985
TG
688 /*
689 * On read synchronize the userspace interface. This is a
690 * racy snapshot.
691 */
692 *param = (watchdog_enabled & which) != 0;
ef246a21
UO
693 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
694 } else {
09154985 695 old = READ_ONCE(*param);
ef246a21 696 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
09154985 697 if (!err && old != READ_ONCE(*param))
d57108d4 698 proc_watchdog_update();
ef246a21 699 }
946d1977 700 mutex_unlock(&watchdog_mutex);
ef246a21
UO
701 return err;
702}
703
83a80a39
UO
704/*
705 * /proc/sys/kernel/watchdog
706 */
707int proc_watchdog(struct ctl_table *table, int write,
708 void __user *buffer, size_t *lenp, loff_t *ppos)
709{
710 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
711 table, write, buffer, lenp, ppos);
712}
713
714/*
715 * /proc/sys/kernel/nmi_watchdog
58687acb 716 */
83a80a39
UO
717int proc_nmi_watchdog(struct ctl_table *table, int write,
718 void __user *buffer, size_t *lenp, loff_t *ppos)
719{
a994a314
TG
720 if (!nmi_watchdog_available && write)
721 return -ENOTSUPP;
83a80a39
UO
722 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
723 table, write, buffer, lenp, ppos);
724}
725
726/*
727 * /proc/sys/kernel/soft_watchdog
728 */
729int proc_soft_watchdog(struct ctl_table *table, int write,
730 void __user *buffer, size_t *lenp, loff_t *ppos)
731{
732 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
733 table, write, buffer, lenp, ppos);
734}
58687acb 735
83a80a39
UO
736/*
737 * /proc/sys/kernel/watchdog_thresh
738 */
739int proc_watchdog_thresh(struct ctl_table *table, int write,
740 void __user *buffer, size_t *lenp, loff_t *ppos)
58687acb 741{
d57108d4 742 int err, old;
58687acb 743
946d1977 744 mutex_lock(&watchdog_mutex);
bcd951cf 745
d57108d4 746 old = READ_ONCE(watchdog_thresh);
b8900bc0 747 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
83a80a39 748
d57108d4
TG
749 if (!err && write && old != READ_ONCE(watchdog_thresh))
750 proc_watchdog_update();
e04ab2bc 751
946d1977 752 mutex_unlock(&watchdog_mutex);
b8900bc0 753 return err;
58687acb 754}
fe4ba3c3
CM
755
756/*
757 * The cpumask is the mask of possible cpus that the watchdog can run
758 * on, not the mask of cpus it is actually running on. This allows the
759 * user to specify a mask that will include cpus that have not yet
760 * been brought online, if desired.
761 */
762int proc_watchdog_cpumask(struct ctl_table *table, int write,
763 void __user *buffer, size_t *lenp, loff_t *ppos)
764{
765 int err;
766
946d1977 767 mutex_lock(&watchdog_mutex);
8c073d27 768
fe4ba3c3 769 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
05ba3de7 770 if (!err && write)
e8b62b2d 771 proc_watchdog_update();
5490125d 772
946d1977 773 mutex_unlock(&watchdog_mutex);
fe4ba3c3
CM
774 return err;
775}
58687acb
DZ
776#endif /* CONFIG_SYSCTL */
777
004417a6 778void __init lockup_detector_init(void)
58687acb 779{
13316b31 780 if (tick_nohz_full_enabled())
314b08ff 781 pr_info("Disabling watchdog on nohz_full cores by default\n");
13316b31 782
de201559
FW
783 cpumask_copy(&watchdog_cpumask,
784 housekeeping_cpumask(HK_FLAG_TIMER));
fe4ba3c3 785
a994a314
TG
786 if (!watchdog_nmi_probe())
787 nmi_watchdog_available = true;
5587185d 788 lockup_detector_setup();
58687acb 789}