]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: convert hugetlb_page_mapping_lock_write to folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 12 Apr 2024 19:35:03 +0000 (20:35 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:46 +0000 (17:53 -0700)
The page is only used to get the mapping, so the folio will do just as
well.  Both callers already have a folio available, so this saves a call
to compound_head().

Link: https://lkml.kernel.org/r/20240412193510.2356957-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jane Chu  <jane.chu@oracle.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/memory-failure.c
mm/migrate.c

index 1bc93e7e315bb4a3ffcbe37a421d8fa8d2b092e3..68244bb3637a877f9fff4ad0437bab0ef5b29547 100644 (file)
@@ -178,7 +178,7 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
                                 struct vm_area_struct *vma,
                                 unsigned long address);
 
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
 
 extern int sysctl_hugetlb_shm_group;
 extern struct list_head huge_boot_pages[MAX_NUMNODES];
@@ -297,8 +297,8 @@ static inline unsigned long hugetlb_total_pages(void)
        return 0;
 }
 
-static inline struct address_space *hugetlb_page_mapping_lock_write(
-                                                       struct page *hpage)
+static inline struct address_space *hugetlb_folio_mapping_lock_write(
+                                                       struct folio *folio)
 {
        return NULL;
 }
index 3b7d5ddc32ad7afcb06755e7268269d8f0de71ea..417fc5cdb6eebd25caa6fb5d5bbe8eaae6a0f9cc 100644 (file)
@@ -2155,13 +2155,13 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
 /*
  * Find and lock address space (mapping) in write mode.
  *
- * Upon entry, the page is locked which means that page_mapping() is
+ * Upon entry, the folio is locked which means that folio_mapping() is
  * stable.  Due to locking order, we can only trylock_write.  If we can
  * not get the lock, simply return NULL to caller.
  */
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
 {
-       struct address_space *mapping = page_mapping(hpage);
+       struct address_space *mapping = folio_mapping(folio);
 
        if (!mapping)
                return mapping;
index 4daf581e3878431b9f9dd43c0f43a4d852a087c1..1a5f3403dd2a9058de8420d545c366de2c41b50c 100644 (file)
@@ -1624,7 +1624,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                 * TTU_RMAP_LOCKED to indicate we have taken the lock
                 * at this higher level.
                 */
-               mapping = hugetlb_page_mapping_lock_write(hpage);
+               mapping = hugetlb_folio_mapping_lock_write(folio);
                if (mapping) {
                        try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
                        i_mmap_unlock_write(mapping);
index c7692f303fa7333642871164f9656f057720f6e0..dd04f578c19c3e26cc4c299e340db7a897cc3bfe 100644 (file)
@@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
                         * semaphore in write mode here and set TTU_RMAP_LOCKED
                         * to let lower levels know we have taken the lock.
                         */
-                       mapping = hugetlb_page_mapping_lock_write(&src->page);
+                       mapping = hugetlb_folio_mapping_lock_write(src);
                        if (unlikely(!mapping))
                                goto unlock_put_anon;