]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: huge_memory: use a folio in do_huge_pmd_numa_page()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 21 Sep 2023 07:44:13 +0000 (15:44 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 16 Oct 2023 22:44:37 +0000 (15:44 -0700)
Use a folio in do_huge_pmd_numa_page(), reduce three page_folio() calls to
one, no functional change intended.

Link: https://lkml.kernel.org/r/20230921074417.24004-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 5c20e43782e40e11a08f50f14699813bb06958cc..5baf9b6dc52249e5f935f6cfffbe0041b17d6b76 100644 (file)
@@ -1517,9 +1517,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        pmd_t oldpmd = vmf->orig_pmd;
        pmd_t pmd;
-       struct page *page;
+       struct folio *folio;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
-       int page_nid = NUMA_NO_NODE;
+       int nid = NUMA_NO_NODE;
        int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
        bool migrated = false, writable = false;
        int flags = 0;
@@ -1541,36 +1541,34 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
            can_change_pmd_writable(vma, vmf->address, pmd))
                writable = true;
 
-       page = vm_normal_page_pmd(vma, haddr, pmd);
-       if (!page)
+       folio = vm_normal_folio_pmd(vma, haddr, pmd);
+       if (!folio)
                goto out_map;
 
        /* See similar comment in do_numa_page for explanation */
        if (!writable)
                flags |= TNF_NO_GROUP;
 
-       page_nid = page_to_nid(page);
+       nid = folio_nid(folio);
        /*
         * For memory tiering mode, cpupid of slow memory page is used
         * to record page access time.  So use default value.
         */
-       if (node_is_toptier(page_nid))
-               last_cpupid = page_cpupid_last(page);
-       target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
-                                      &flags);
-
+       if (node_is_toptier(nid))
+               last_cpupid = page_cpupid_last(&folio->page);
+       target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags);
        if (target_nid == NUMA_NO_NODE) {
-               put_page(page);
+               folio_put(folio);
                goto out_map;
        }
 
        spin_unlock(vmf->ptl);
        writable = false;
 
-       migrated = migrate_misplaced_folio(page_folio(page), vma, target_nid);
+       migrated = migrate_misplaced_folio(folio, vma, target_nid);
        if (migrated) {
                flags |= TNF_MIGRATED;
-               page_nid = target_nid;
+               nid = target_nid;
        } else {
                flags |= TNF_MIGRATE_FAIL;
                vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -1582,9 +1580,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
        }
 
 out:
-       if (page_nid != NUMA_NO_NODE)
-               task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
-                               flags);
+       if (nid != NUMA_NO_NODE)
+               task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
 
        return 0;