]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: mprotect: avoid unnecessary struct page accessing if pte_protnone()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 23 Oct 2025 11:37:35 +0000 (19:37 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Nov 2025 01:28:03 +0000 (17:28 -0800)
If the pte_protnone() is true, we could avoid unnecessary struct page
accessing and reduce cache footprint when scanning page tables for prot
numa, there was a similar change before, see more commit a818f5363a0e
("autonuma: reduce cache footprint when scanning page tables").

Link: https://lkml.kernel.org/r/20251023113737.3572790-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mprotect.c

index 056986d9076a846a72314832f76bc09031f6a500..6236d120c8e6d5779db2b5caef084216570c54fc 100644 (file)
@@ -118,18 +118,13 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
        return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
 }
 
-static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
-                          pte_t oldpte, pte_t *pte, int target_node,
-                          struct folio *folio)
+static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
+               struct folio *folio)
 {
        bool ret = true;
        bool toptier;
        int nid;
 
-       /* Avoid TLB flush if possible */
-       if (pte_protnone(oldpte))
-               goto skip;
-
        if (!folio)
                goto skip;
 
@@ -307,23 +302,25 @@ static long change_pte_range(struct mmu_gather *tlb,
                        struct page *page;
                        pte_t ptent;
 
+                       /* Already in the desired state. */
+                       if (prot_numa && pte_protnone(oldpte))
+                               continue;
+
                        page = vm_normal_page(vma, addr, oldpte);
                        if (page)
                                folio = page_folio(page);
+
                        /*
                         * Avoid trapping faults against the zero or KSM
                         * pages. See similar comment in change_huge_pmd.
                         */
-                       if (prot_numa) {
-                               int ret = prot_numa_skip(vma, addr, oldpte, pte,
-                                                        target_node, folio);
-                               if (ret) {
+                       if (prot_numa &&
+                           prot_numa_skip(vma, target_node, folio)) {
 
-                                       /* determine batch to skip */
-                                       nr_ptes = mprotect_folio_pte_batch(folio,
-                                                 pte, oldpte, max_nr_ptes, /* flags = */ 0);
-                                       continue;
-                               }
+                               /* determine batch to skip */
+                               nr_ptes = mprotect_folio_pte_batch(folio,
+                                         pte, oldpte, max_nr_ptes, /* flags = */ 0);
+                               continue;
                        }
 
                        nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);