]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
watchdog: update saved interrupts during check
authorMayank Rungta <mrungta@google.com>
Thu, 12 Mar 2026 23:22:03 +0000 (16:22 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 28 Mar 2026 04:19:46 +0000 (21:19 -0700)
Currently, arch_touch_nmi_watchdog() causes an early return that skips
updating hrtimer_interrupts_saved.  This leads to stale comparisons and
delayed lockup detection.

I found this issue because in our system the serial console is fairly
chatty.  For example, the 8250 console driver frequently calls
touch_nmi_watchdog() via console_write().  If a CPU locks up after a timer
interrupt but before next watchdog check, we see the following sequence:

  * watchdog_hardlockup_check() saves counter (e.g., 1000)
  * Timer runs and updates the counter (1001)
  * touch_nmi_watchdog() is called
  * CPU locks up
  * 10s pass: check() notices touch, returns early, skips update
  * 10s pass: check() saves counter (1001)
  * 10s pass: check() finally detects lockup

This delays detection to 30 seconds.  With this fix, we detect the lockup
in 20 seconds.

Link: https://lkml.kernel.org/r/20260312-hardlockup-watchdog-fixes-v2-2-45bd8a0cc7ed@google.com
Signed-off-by: Mayank Rungta <mrungta@google.com>
Reviewed-by: Douglas Anderson <dianders@chromium.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Li Huafei <lihuafei1@huawei.com>
Cc: Max Kellermann <max.kellermann@ionos.com>
Cc: Shuah Khan <skhan@linuxfoundation.org>
Cc: Stephane Erainan <eranian@google.com>
Cc: Wang Jinchao <wangjinchao600@gmail.com>
Cc: Yunhui Cui <cuiyunhui@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/watchdog.c

index 4c5b474957455f651327832ec1e843aafe8ced69..431c540bd0354a2e5dee6097eedafa0f67f4d143 100644 (file)
@@ -159,21 +159,28 @@ void watchdog_hardlockup_touch_cpu(unsigned int cpu)
        per_cpu(watchdog_hardlockup_touched, cpu) = true;
 }
 
-static bool is_hardlockup(unsigned int cpu)
+static void watchdog_hardlockup_update(unsigned int cpu)
 {
        int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
 
-       if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
-               return true;
-
        /*
         * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
         * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
         * written/read by a single CPU.
         */
        per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+}
+
+static bool is_hardlockup(unsigned int cpu)
+{
+       int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
+
+       if (per_cpu(hrtimer_interrupts_saved, cpu) != hrint) {
+               watchdog_hardlockup_update(cpu);
+               return false;
+       }
 
-       return false;
+       return true;
 }
 
 static void watchdog_hardlockup_kick(void)
@@ -191,6 +198,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
        unsigned long flags;
 
        if (per_cpu(watchdog_hardlockup_touched, cpu)) {
+               watchdog_hardlockup_update(cpu);
                per_cpu(watchdog_hardlockup_touched, cpu) = false;
                return;
        }