]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
fs: cosmetic fixes to lru handling
authorMateusz Guzik <mjguzik@gmail.com>
Wed, 29 Oct 2025 13:14:28 +0000 (14:14 +0100)
committerChristian Brauner <brauner@kernel.org>
Tue, 25 Nov 2025 09:34:49 +0000 (10:34 +0100)
1. inode_bit_waitqueue() was somehow placed between __inode_add_lru() and
   inode_add_lru(). move it up
2. assert ->i_lock is held in __inode_add_lru instead of just claiming it is
   needed
3. s/__inode_add_lru/__inode_lru_list_add/ for consistency with itself
   (inode_lru_list_del()) and similar routines for sb and io list
   management
4. push list presence check into inode_lru_list_del(), just like sb and
   io list

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://patch.msgid.link/20251029131428.654761-2-mjguzik@gmail.com
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/fs-writeback.c
fs/inode.c
include/linux/fs.h
mm/filemap.c
mm/truncate.c
mm/vmscan.c
mm/workingset.c

index f784d8b09b04996c697ef988578f10a07b7fbdb6..c00b72e2d3392701c8b0ea7099bd0a261e902c55 100644 (file)
@@ -1452,7 +1452,7 @@ static void inode_sync_complete(struct inode *inode)
 
        inode_state_clear(inode, I_SYNC);
        /* If inode is clean an unused, put it into LRU now... */
-       inode_add_lru(inode);
+       inode_lru_list_add(inode);
        /* Called with inode->i_lock which ensures memory ordering. */
        inode_wake_up_bit(inode, __I_SYNC);
 }
index 80298f048117aec93ce0511f56f562d341f8b1d1..7229a56732c6d21e890c90caee18b2e81accf34e 100644 (file)
@@ -530,23 +530,6 @@ void ihold(struct inode *inode)
 }
 EXPORT_SYMBOL(ihold);
 
-static void __inode_add_lru(struct inode *inode, bool rotate)
-{
-       if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
-               return;
-       if (icount_read(inode))
-               return;
-       if (!(inode->i_sb->s_flags & SB_ACTIVE))
-               return;
-       if (!mapping_shrinkable(&inode->i_data))
-               return;
-
-       if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
-               this_cpu_inc(nr_unused);
-       else if (rotate)
-               inode_state_set(inode, I_REFERENCED);
-}
-
 struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
                                            struct inode *inode, u32 bit)
 {
@@ -584,18 +567,38 @@ void wait_on_new_inode(struct inode *inode)
 }
 EXPORT_SYMBOL(wait_on_new_inode);
 
+static void __inode_lru_list_add(struct inode *inode, bool rotate)
+{
+       lockdep_assert_held(&inode->i_lock);
+
+       if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
+               return;
+       if (icount_read(inode))
+               return;
+       if (!(inode->i_sb->s_flags & SB_ACTIVE))
+               return;
+       if (!mapping_shrinkable(&inode->i_data))
+               return;
+
+       if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
+               this_cpu_inc(nr_unused);
+       else if (rotate)
+               inode_state_set(inode, I_REFERENCED);
+}
+
 /*
  * Add inode to LRU if needed (inode is unused and clean).
- *
- * Needs inode->i_lock held.
  */
-void inode_add_lru(struct inode *inode)
+void inode_lru_list_add(struct inode *inode)
 {
-       __inode_add_lru(inode, false);
+       __inode_lru_list_add(inode, false);
 }
 
 static void inode_lru_list_del(struct inode *inode)
 {
+       if (list_empty(&inode->i_lru))
+               return;
+
        if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
                this_cpu_dec(nr_unused);
 }
@@ -1920,7 +1923,7 @@ static void iput_final(struct inode *inode)
        if (!drop &&
            !(inode_state_read(inode) & I_DONTCACHE) &&
            (sb->s_flags & SB_ACTIVE)) {
-               __inode_add_lru(inode, true);
+               __inode_lru_list_add(inode, true);
                spin_unlock(&inode->i_lock);
                return;
        }
@@ -1944,8 +1947,7 @@ static void iput_final(struct inode *inode)
                inode_state_replace(inode, I_WILL_FREE, I_FREEING);
        }
 
-       if (!list_empty(&inode->i_lru))
-               inode_lru_list_del(inode);
+       inode_lru_list_del(inode);
        spin_unlock(&inode->i_lock);
 
        evict(inode);
index a813abdcf21800468052f783847961d0b2390198..33129cda3a998529428caaaaaf56a1eb2486ab97 100644 (file)
@@ -3502,7 +3502,7 @@ static inline void remove_inode_hash(struct inode *inode)
 }
 
 extern void inode_sb_list_add(struct inode *inode);
-extern void inode_add_lru(struct inode *inode);
+extern void inode_lru_list_add(struct inode *inode);
 
 extern int sb_set_blocksize(struct super_block *, int);
 extern int sb_min_blocksize(struct super_block *, int);
index 13f0259d993c9c0431e51b22a42fe0f644a4546a..add5228a7d9715693c055422d133964093e1498e 100644 (file)
@@ -256,7 +256,7 @@ void filemap_remove_folio(struct folio *folio)
        __filemap_remove_folio(folio, NULL);
        xa_unlock_irq(&mapping->i_pages);
        if (mapping_shrinkable(mapping))
-               inode_add_lru(mapping->host);
+               inode_lru_list_add(mapping->host);
        spin_unlock(&mapping->host->i_lock);
 
        filemap_free_folio(mapping, folio);
@@ -335,7 +335,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
        page_cache_delete_batch(mapping, fbatch);
        xa_unlock_irq(&mapping->i_pages);
        if (mapping_shrinkable(mapping))
-               inode_add_lru(mapping->host);
+               inode_lru_list_add(mapping->host);
        spin_unlock(&mapping->host->i_lock);
 
        for (i = 0; i < folio_batch_count(fbatch); i++)
index 91eb92a5ce4fdcf110a3fdbc2abfaefe532a42fa..ad9c0fa29d94126f28ffb4991be35e9bb78f445a 100644 (file)
@@ -46,7 +46,7 @@ static void clear_shadow_entries(struct address_space *mapping,
 
        xas_unlock_irq(&xas);
        if (mapping_shrinkable(mapping))
-               inode_add_lru(mapping->host);
+               inode_lru_list_add(mapping->host);
        spin_unlock(&mapping->host->i_lock);
 }
 
@@ -111,7 +111,7 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
 
        xas_unlock_irq(&xas);
        if (mapping_shrinkable(mapping))
-               inode_add_lru(mapping->host);
+               inode_lru_list_add(mapping->host);
        spin_unlock(&mapping->host->i_lock);
 out:
        folio_batch_remove_exceptionals(fbatch);
@@ -622,7 +622,7 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
        __filemap_remove_folio(folio, NULL);
        xa_unlock_irq(&mapping->i_pages);
        if (mapping_shrinkable(mapping))
-               inode_add_lru(mapping->host);
+               inode_lru_list_add(mapping->host);
        spin_unlock(&mapping->host->i_lock);
 
        filemap_free_folio(mapping, folio);
index b2fc8b626d3dff9d34c2d747c4798e67a344305e..bb4a96c7b682a3263cb41f7fd45982fec025c2d2 100644 (file)
@@ -811,7 +811,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
                __filemap_remove_folio(folio, shadow);
                xa_unlock_irq(&mapping->i_pages);
                if (mapping_shrinkable(mapping))
-                       inode_add_lru(mapping->host);
+                       inode_lru_list_add(mapping->host);
                spin_unlock(&mapping->host->i_lock);
 
                if (free_folio)
index 68a76a91111f4f649a5c1c70c1392a622f218a63..d32dc2e02a6159a63e0a508960229c1064f8f12b 100644 (file)
@@ -755,7 +755,7 @@ out_invalid:
        xa_unlock_irq(&mapping->i_pages);
        if (mapping->host != NULL) {
                if (mapping_shrinkable(mapping))
-                       inode_add_lru(mapping->host);
+                       inode_lru_list_add(mapping->host);
                spin_unlock(&mapping->host->i_lock);
        }
        ret = LRU_REMOVED_RETRY;