]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
writeback: fix 100% CPU usage when dirtytime_expire_interval is 0
authorLaveesh Bansal <laveeshb@laveeshbansal.com>
Tue, 3 Feb 2026 20:19:34 +0000 (15:19 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 6 Feb 2026 15:44:25 +0000 (16:44 +0100)
[ Upstream commit 543467d6fe97e27e22a26e367fda972dbefebbff ]

When vm.dirtytime_expire_seconds is set to 0, wakeup_dirtytime_writeback()
schedules delayed work with a delay of 0, causing immediate execution.
The function then reschedules itself with 0 delay again, creating an
infinite busy loop that causes 100% kworker CPU usage.

Fix by:
- Only scheduling delayed work in wakeup_dirtytime_writeback() when
  dirtytime_expire_interval is non-zero
- Cancelling the delayed work in dirtytime_interval_handler() when
  the interval is set to 0
- Adding a guard in start_dirtytime_writeback() for defensive coding

Tested by booting kernel in QEMU with virtme-ng:
- Before fix: kworker CPU spikes to ~73%
- After fix: CPU remains at normal levels
- Setting interval back to non-zero correctly resumes writeback

Fixes: a2f4870697a5 ("fs: make sure the timestamps for lazytime inodes eventually get written")
Cc: stable@vger.kernel.org
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220227
Signed-off-by: Laveesh Bansal <laveeshb@laveeshbansal.com>
Link: https://patch.msgid.link/20260106145059.543282-2-laveeshb@laveeshbansal.com
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
[ adapted system_percpu_wq to system_wq for the workqueue used in dirtytime_interval_handler() ]
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/fs-writeback.c

index 75e8c102c5eefcadd48fa361142b2993e4e2b838..0a36fc5e1bf2cdfcdf254ea610db7e1543ee6788 100644 (file)
@@ -2360,12 +2360,14 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
                                wb_wakeup(wb);
        }
        rcu_read_unlock();
-       schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+       if (dirtytime_expire_interval)
+               schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
 }
 
 static int __init start_dirtytime_writeback(void)
 {
-       schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+       if (dirtytime_expire_interval)
+               schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
        return 0;
 }
 __initcall(start_dirtytime_writeback);
@@ -2376,8 +2378,12 @@ int dirtytime_interval_handler(struct ctl_table *table, int write,
        int ret;
 
        ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-       if (ret == 0 && write)
-               mod_delayed_work(system_wq, &dirtytime_work, 0);
+       if (ret == 0 && write) {
+               if (dirtytime_expire_interval)
+                       mod_delayed_work(system_wq, &dirtytime_work, 0);
+               else
+                       cancel_delayed_work_sync(&dirtytime_work);
+       }
        return ret;
 }