]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
filemap: convert replace_page_cache_page() to replace_page_cache_folio()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Tue, 1 Nov 2022 17:53:22 +0000 (10:53 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Dec 2022 02:12:12 +0000 (18:12 -0800)
Patch series "Removing the lru_cache_add() wrapper".

This patchset replaces all calls of lru_cache_add() with the folio
equivalent: folio_add_lru().  This is allows us to get rid of the wrapper
The series passes xfstests and the userfaultfd selftests.

This patch (of 5):

Eliminates 7 calls to compound_head().

Link: https://lkml.kernel.org/r/20221101175326.13265-1-vishal.moola@gmail.com
Link: https://lkml.kernel.org/r/20221101175326.13265-2-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/fuse/dev.c
include/linux/pagemap.h
mm/filemap.c

index b4a6e0a1b945aaf82eb3f141a2b188307379da7e..26817a2db4639cc582749eb205f560c303d38393 100644 (file)
@@ -837,7 +837,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
        if (WARN_ON(PageMlocked(oldpage)))
                goto out_fallback_unlock;
 
-       replace_page_cache_page(oldpage, newpage);
+       replace_page_cache_folio(page_folio(oldpage), page_folio(newpage));
 
        get_page(newpage);
 
index 2ec0ca1f3d38968338d2df6713e659495bb6a4fe..29e1f9e76eb6ddb8a2f3ec596499372865f3053a 100644 (file)
@@ -1102,7 +1102,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
                pgoff_t index, gfp_t gfp);
 void filemap_remove_folio(struct folio *folio);
 void __filemap_remove_folio(struct folio *folio, void *shadow);
-void replace_page_cache_page(struct page *old, struct page *new);
+void replace_page_cache_folio(struct folio *old, struct folio *new);
 void delete_from_page_cache_batch(struct address_space *mapping,
                                  struct folio_batch *fbatch);
 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
index 242cd8bd8330441a6ccdd69a5cb4d0ee96f85b2a..c4d4ace9cc700300d966328d9769b5527d91ca8b 100644 (file)
@@ -788,56 +788,54 @@ int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
 EXPORT_SYMBOL(file_write_and_wait_range);
 
 /**
- * replace_page_cache_page - replace a pagecache page with a new one
- * @old:       page to be replaced
- * @new:       page to replace with
- *
- * This function replaces a page in the pagecache with a new one.  On
- * success it acquires the pagecache reference for the new page and
- * drops it for the old page.  Both the old and new pages must be
- * locked.  This function does not add the new page to the LRU, the
+ * replace_page_cache_folio - replace a pagecache folio with a new one
+ * @old:       folio to be replaced
+ * @new:       folio to replace with
+ *
+ * This function replaces a folio in the pagecache with a new one.  On
+ * success it acquires the pagecache reference for the new folio and
+ * drops it for the old folio.  Both the old and new folios must be
+ * locked.  This function does not add the new folio to the LRU, the
  * caller must do that.
  *
  * The remove + add is atomic.  This function cannot fail.
  */
-void replace_page_cache_page(struct page *old, struct page *new)
+void replace_page_cache_folio(struct folio *old, struct folio *new)
 {
-       struct folio *fold = page_folio(old);
-       struct folio *fnew = page_folio(new);
        struct address_space *mapping = old->mapping;
        void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
        pgoff_t offset = old->index;
        XA_STATE(xas, &mapping->i_pages, offset);
 
-       VM_BUG_ON_PAGE(!PageLocked(old), old);
-       VM_BUG_ON_PAGE(!PageLocked(new), new);
-       VM_BUG_ON_PAGE(new->mapping, new);
+       VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
+       VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
+       VM_BUG_ON_FOLIO(new->mapping, new);
 
-       get_page(new);
+       folio_get(new);
        new->mapping = mapping;
        new->index = offset;
 
-       mem_cgroup_migrate(fold, fnew);
+       mem_cgroup_migrate(old, new);
 
        xas_lock_irq(&xas);
        xas_store(&xas, new);
 
        old->mapping = NULL;
        /* hugetlb pages do not participate in page cache accounting. */
-       if (!PageHuge(old))
-               __dec_lruvec_page_state(old, NR_FILE_PAGES);
-       if (!PageHuge(new))
-               __inc_lruvec_page_state(new, NR_FILE_PAGES);
-       if (PageSwapBacked(old))
-               __dec_lruvec_page_state(old, NR_SHMEM);
-       if (PageSwapBacked(new))
-               __inc_lruvec_page_state(new, NR_SHMEM);
+       if (!folio_test_hugetlb(old))
+               __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
+       if (!folio_test_hugetlb(new))
+               __lruvec_stat_add_folio(new, NR_FILE_PAGES);
+       if (folio_test_swapbacked(old))
+               __lruvec_stat_sub_folio(old, NR_SHMEM);
+       if (folio_test_swapbacked(new))
+               __lruvec_stat_add_folio(new, NR_SHMEM);
        xas_unlock_irq(&xas);
        if (free_folio)
-               free_folio(fold);
-       folio_put(fold);
+               free_folio(old);
+       folio_put(old);
 }
-EXPORT_SYMBOL_GPL(replace_page_cache_page);
+EXPORT_SYMBOL_GPL(replace_page_cache_folio);
 
 noinline int __filemap_add_folio(struct address_space *mapping,
                struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)