]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
fs: avoid softlockups in s_inodes iterators
authorEric Sandeen <sandeen@redhat.com>
Fri, 6 Dec 2019 16:54:23 +0000 (10:54 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 12 Jan 2020 11:11:59 +0000 (12:11 +0100)
[ Upstream commit 04646aebd30b99f2cfa0182435a2ec252fcb16d0 ]

Anything that walks all inodes on sb->s_inodes list without rescheduling
risks softlockups.

Previous efforts were made in 2 functions, see:

c27d82f fs/drop_caches.c: avoid softlockups in drop_pagecache_sb()
ac05fbb inode: don't softlockup when evicting inodes

but there hasn't been an audit of all walkers, so do that now.  This
also consistently moves the cond_resched() calls to the bottom of each
loop in cases where it already exists.

One loop remains: remove_dquot_ref(), because I'm not quite sure how
to deal with that one w/o taking the i_lock.

Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/drop_caches.c
fs/inode.c
fs/notify/fsnotify.c
fs/quota/dquot.c

index d31b6c72b47646fd8ae2ebd3746de307f8017fa0..dc1a1d5d825b48de17f3192f8d0c50829dd07885 100644 (file)
@@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
                spin_unlock(&inode->i_lock);
                spin_unlock(&sb->s_inode_list_lock);
 
-               cond_resched();
                invalidate_mapping_pages(inode->i_mapping, 0, -1);
                iput(toput_inode);
                toput_inode = inode;
 
+               cond_resched();
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);
index 76f7535fe7541390431bdf51026b584a3036bea8..d2a700c5efce8c9fd6a91c43ebca8f230d5fd084 100644 (file)
@@ -656,6 +656,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
        struct inode *inode, *next;
        LIST_HEAD(dispose);
 
+again:
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
                spin_lock(&inode->i_lock);
@@ -678,6 +679,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
                inode_lru_list_del(inode);
                spin_unlock(&inode->i_lock);
                list_add(&inode->i_lru, &dispose);
+               if (need_resched()) {
+                       spin_unlock(&sb->s_inode_list_lock);
+                       cond_resched();
+                       dispose_list(&dispose);
+                       goto again;
+               }
        }
        spin_unlock(&sb->s_inode_list_lock);
 
index 506da82ff3f14915e6afc75af79570aeb85d981f..a308f7a7e57704787c9fe816aae5440b7ea37289 100644 (file)
@@ -90,6 +90,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
 
                iput_inode = inode;
 
+               cond_resched();
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);
index 3fdbdd29702bc26f6d1d2160d41ca2e466f21406..30f5da8f4affafd0ccbee08ecd65103ee7cf14c1 100644 (file)
@@ -976,6 +976,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
                 * later.
                 */
                old_inode = inode;
+               cond_resched();
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);