]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
fs: make insert_inode_locked() wait for inode destruction
authorMateusz Guzik <mjguzik@gmail.com>
Wed, 14 Jan 2026 09:47:16 +0000 (10:47 +0100)
committerChristian Brauner <brauner@kernel.org>
Wed, 14 Jan 2026 16:05:35 +0000 (17:05 +0100)
This is the only routine which instead skipped instead of waiting.

The current behavior is arguably a bug as it results in a corner case
where the inode hash can have *two* matching inodes, one of which is on
its way out.

Ironing out this difference is an incremental step towards sanitizing
the API.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://patch.msgid.link/20260114094717.236202-1-mjguzik@gmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/inode.c

index d317637dc3b46535f5622a0335f6bef35b0972bf..de398c63fb7235335b10ff039b1255f4c8eb40bc 100644 (file)
@@ -1028,19 +1028,20 @@ long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
        return freed;
 }
 
-static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked);
+static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked);
+
 /*
  * Called with the inode lock held.
  */
 static struct inode *find_inode(struct super_block *sb,
                                struct hlist_head *head,
                                int (*test)(struct inode *, void *),
-                               void *data, bool is_inode_hash_locked,
+                               void *data, bool hash_locked,
                                bool *isnew)
 {
        struct inode *inode = NULL;
 
-       if (is_inode_hash_locked)
+       if (hash_locked)
                lockdep_assert_held(&inode_hash_lock);
        else
                lockdep_assert_not_held(&inode_hash_lock);
@@ -1054,7 +1055,7 @@ repeat:
                        continue;
                spin_lock(&inode->i_lock);
                if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
-                       __wait_on_freeing_inode(inode, is_inode_hash_locked);
+                       __wait_on_freeing_inode(inode, hash_locked, true);
                        goto repeat;
                }
                if (unlikely(inode_state_read(inode) & I_CREATING)) {
@@ -1078,11 +1079,11 @@ repeat:
  */
 static struct inode *find_inode_fast(struct super_block *sb,
                                struct hlist_head *head, unsigned long ino,
-                               bool is_inode_hash_locked, bool *isnew)
+                               bool hash_locked, bool *isnew)
 {
        struct inode *inode = NULL;
 
-       if (is_inode_hash_locked)
+       if (hash_locked)
                lockdep_assert_held(&inode_hash_lock);
        else
                lockdep_assert_not_held(&inode_hash_lock);
@@ -1096,7 +1097,7 @@ repeat:
                        continue;
                spin_lock(&inode->i_lock);
                if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE)) {
-                       __wait_on_freeing_inode(inode, is_inode_hash_locked);
+                       __wait_on_freeing_inode(inode, hash_locked, true);
                        goto repeat;
                }
                if (unlikely(inode_state_read(inode) & I_CREATING)) {
@@ -1832,16 +1833,13 @@ int insert_inode_locked(struct inode *inode)
        while (1) {
                struct inode *old = NULL;
                spin_lock(&inode_hash_lock);
+repeat:
                hlist_for_each_entry(old, head, i_hash) {
                        if (old->i_ino != ino)
                                continue;
                        if (old->i_sb != sb)
                                continue;
                        spin_lock(&old->i_lock);
-                       if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
-                               spin_unlock(&old->i_lock);
-                               continue;
-                       }
                        break;
                }
                if (likely(!old)) {
@@ -1852,6 +1850,11 @@ int insert_inode_locked(struct inode *inode)
                        spin_unlock(&inode_hash_lock);
                        return 0;
                }
+               if (inode_state_read(old) & (I_FREEING | I_WILL_FREE)) {
+                       __wait_on_freeing_inode(old, true, false);
+                       old = NULL;
+                       goto repeat;
+               }
                if (unlikely(inode_state_read(old) & I_CREATING)) {
                        spin_unlock(&old->i_lock);
                        spin_unlock(&inode_hash_lock);
@@ -2504,16 +2507,18 @@ EXPORT_SYMBOL(inode_needs_sync);
  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
  * will DTRT.
  */
-static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked)
+static void __wait_on_freeing_inode(struct inode *inode, bool hash_locked, bool rcu_locked)
 {
        struct wait_bit_queue_entry wqe;
        struct wait_queue_head *wq_head;
 
+       VFS_BUG_ON(!hash_locked && !rcu_locked);
+
        /*
         * Handle racing against evict(), see that routine for more details.
         */
        if (unlikely(inode_unhashed(inode))) {
-               WARN_ON(is_inode_hash_locked);
+               WARN_ON(hash_locked);
                spin_unlock(&inode->i_lock);
                return;
        }
@@ -2521,14 +2526,16 @@ static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_lock
        wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW);
        prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
        spin_unlock(&inode->i_lock);
-       rcu_read_unlock();
-       if (is_inode_hash_locked)
+       if (rcu_locked)
+               rcu_read_unlock();
+       if (hash_locked)
                spin_unlock(&inode_hash_lock);
        schedule();
        finish_wait(wq_head, &wqe.wq_entry);
-       if (is_inode_hash_locked)
+       if (hash_locked)
                spin_lock(&inode_hash_lock);
-       rcu_read_lock();
+       if (rcu_locked)
+               rcu_read_lock();
 }
 
 static __initdata unsigned long ihash_entries;