inode_state_clear(inode, I_SYNC);
/* If inode is clean an unused, put it into LRU now... */
- inode_add_lru(inode);
+ inode_lru_list_add(inode);
/* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_SYNC);
}
}
EXPORT_SYMBOL(ihold);
-static void __inode_add_lru(struct inode *inode, bool rotate)
-{
- if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
- return;
- if (icount_read(inode))
- return;
- if (!(inode->i_sb->s_flags & SB_ACTIVE))
- return;
- if (!mapping_shrinkable(&inode->i_data))
- return;
-
- if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
- this_cpu_inc(nr_unused);
- else if (rotate)
- inode_state_set(inode, I_REFERENCED);
-}
-
struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
struct inode *inode, u32 bit)
{
}
EXPORT_SYMBOL(wait_on_new_inode);
+static void __inode_lru_list_add(struct inode *inode, bool rotate)
+{
+ lockdep_assert_held(&inode->i_lock);
+
+ if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
+ return;
+ if (icount_read(inode))
+ return;
+ if (!(inode->i_sb->s_flags & SB_ACTIVE))
+ return;
+ if (!mapping_shrinkable(&inode->i_data))
+ return;
+
+ if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
+ this_cpu_inc(nr_unused);
+ else if (rotate)
+ inode_state_set(inode, I_REFERENCED);
+}
+
/*
* Add inode to LRU if needed (inode is unused and clean).
- *
- * Needs inode->i_lock held.
*/
-void inode_add_lru(struct inode *inode)
+void inode_lru_list_add(struct inode *inode)
{
- __inode_add_lru(inode, false);
+ __inode_lru_list_add(inode, false);
}
static void inode_lru_list_del(struct inode *inode)
{
+ if (list_empty(&inode->i_lru))
+ return;
+
if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused);
}
if (!drop &&
!(inode_state_read(inode) & I_DONTCACHE) &&
(sb->s_flags & SB_ACTIVE)) {
- __inode_add_lru(inode, true);
+ __inode_lru_list_add(inode, true);
spin_unlock(&inode->i_lock);
return;
}
inode_state_replace(inode, I_WILL_FREE, I_FREEING);
}
- if (!list_empty(&inode->i_lru))
- inode_lru_list_del(inode);
+ inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
evict(inode);
}
extern void inode_sb_list_add(struct inode *inode);
-extern void inode_add_lru(struct inode *inode);
+extern void inode_lru_list_add(struct inode *inode);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
__filemap_remove_folio(folio, NULL);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio);
page_cache_delete_batch(mapping, fbatch);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
for (i = 0; i < folio_batch_count(fbatch); i++)
xas_unlock_irq(&xas);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
}
xas_unlock_irq(&xas);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
out:
folio_batch_remove_exceptionals(fbatch);
__filemap_remove_folio(folio, NULL);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio);
__filemap_remove_folio(folio, shadow);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
if (free_folio)
xa_unlock_irq(&mapping->i_pages);
if (mapping->host != NULL) {
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
}
ret = LRU_REMOVED_RETRY;