]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
writeback: Avoid softlockup when switching many inodes
authorJan Kara <jack@suse.cz>
Fri, 12 Sep 2025 10:38:36 +0000 (12:38 +0200)
committerChristian Brauner <brauner@kernel.org>
Fri, 19 Sep 2025 11:11:05 +0000 (13:11 +0200)
process_inode_switch_wbs_work() can be switching over 100 inodes to a
different cgroup. Since switching an inode requires counting all dirty &
under-writeback pages in the address space of each inode, this can take
a significant amount of time. Add a possibility to reschedule after
processing each inode to avoid softlockups.

Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/fs-writeback.c

index b0e9092ccf0401489a5449392f8e522596789961..36ef1a796d4b987b2d88eced6315ffc9013d27f0 100644 (file)
@@ -500,6 +500,7 @@ static void process_inode_switch_wbs(struct bdi_writeback *new_wb,
         */
        down_read(&bdi->wb_switch_rwsem);
 
+       inodep = isw->inodes;
        /*
         * By the time control reaches here, RCU grace period has passed
         * since I_WB_SWITCH assertion and all wb stat update transactions
@@ -510,6 +511,7 @@ static void process_inode_switch_wbs(struct bdi_writeback *new_wb,
         * gives us exclusion against all wb related operations on @inode
         * including IO list manipulations and stat updates.
         */
+relock:
        if (old_wb < new_wb) {
                spin_lock(&old_wb->list_lock);
                spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
@@ -518,10 +520,17 @@ static void process_inode_switch_wbs(struct bdi_writeback *new_wb,
                spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
        }
 
-       for (inodep = isw->inodes; *inodep; inodep++) {
+       while (*inodep) {
                WARN_ON_ONCE((*inodep)->i_wb != old_wb);
                if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
                        nr_switched++;
+               inodep++;
+               if (*inodep && need_resched()) {
+                       spin_unlock(&new_wb->list_lock);
+                       spin_unlock(&old_wb->list_lock);
+                       cond_resched();
+                       goto relock;
+               }
        }
 
        spin_unlock(&new_wb->list_lock);