]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: convert do_set_pmd() to take a folio
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 12 May 2025 02:57:12 +0000 (10:57 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 May 2025 21:55:37 +0000 (14:55 -0700)
In do_set_pmd(), we always use the folio->page to build PMD mappings for
the entire folio.  Since all callers of do_set_pmd() already hold a stable
folio, converting do_set_pmd() to take a folio is safe and more
straightforward.

In addition, to ensure the extensibility of do_set_pmd() for supporting
larger folios beyond PMD size, we keep the 'page' parameter to specify
which page within the folio should be mapped.

No functional changes expected.

Link: https://lkml.kernel.org/r/9b488f4ecb4d3fd8634e3d448dd0ed6964482480.1747017104.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/filemap.c
mm/khugepaged.c
mm/memory.c

index a916ea42cfd5cb7d216dab82aede209ab843a441..cd2e513189d601d297f0e8628c17fab0dc18342f 100644 (file)
@@ -1235,7 +1235,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
        return pte;
 }
 
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
                struct page *page, unsigned int nr, unsigned long addr);
 
index 7b90cbeb4a1adfe44da53ea76a60ccc2f0e9609d..09d005848f0d163c3358a2a16447efa5a7952e19 100644 (file)
@@ -3533,7 +3533,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
 
        if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
                struct page *page = folio_file_page(folio, start);
-               vm_fault_t ret = do_set_pmd(vmf, page);
+               vm_fault_t ret = do_set_pmd(vmf, folio, page);
                if (!ret) {
                        /* The page is mapped successfully, reference consumed. */
                        folio_unlock(folio);
index 33daea8f667e888e813715a62afb1649e4b8c1b1..ebcd7c8a4b445046c3d79558a317dfdd16b9a075 100644 (file)
@@ -1478,7 +1478,7 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        mmap_assert_locked(vma->vm_mm);
 
-       if (do_set_pmd(&vmf, page))
+       if (do_set_pmd(&vmf, folio, page))
                return SCAN_FAIL;
 
        folio_get(folio);
index 4cf4adb0de266d9935c77a1e9b055a0cd294265d..5cb48f262ab01273f6c00a74027c4a242723e987 100644 (file)
@@ -5227,9 +5227,8 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
        vmf->prealloc_pte = NULL;
 }
 
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
 {
-       struct folio *folio = page_folio(page);
        struct vm_area_struct *vma = vmf->vma;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
@@ -5302,7 +5301,7 @@ out:
        return ret;
 }
 #else
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
 {
        return VM_FAULT_FALLBACK;
 }
@@ -5396,6 +5395,7 @@ fallback:
        else
                page = vmf->page;
 
+       folio = page_folio(page);
        /*
         * check even for read faults because we might have lost our CoWed
         * page
@@ -5407,8 +5407,8 @@ fallback:
        }
 
        if (pmd_none(*vmf->pmd)) {
-               if (PageTransCompound(page)) {
-                       ret = do_set_pmd(vmf, page);
+               if (folio_test_pmd_mappable(folio)) {
+                       ret = do_set_pmd(vmf, folio, page);
                        if (ret != VM_FAULT_FALLBACK)
                                return ret;
                }
@@ -5419,7 +5419,6 @@ fallback:
                        return VM_FAULT_OOM;
        }
 
-       folio = page_folio(page);
        nr_pages = folio_nr_pages(folio);
 
        /*