]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/rmap: mlock large folios in try_to_unmap_one()
authorKiryl Shutsemau <kas@kernel.org>
Tue, 23 Sep 2025 11:07:08 +0000 (12:07 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 28 Sep 2025 18:51:30 +0000 (11:51 -0700)
Currently, try_to_unmap_once() only tries to mlock small folios.

Use logic similar to folio_referenced_one() to mlock large folios: only do
this for fully mapped folios and under page table lock that protects all
page table entries.

[akpm@linux-foundation.org: s/CROSSSED/CROSSED/]
Link: https://lkml.kernel.org/r/20250923110711.690639-4-kirill@shutemov.name
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index d174168b8f93366c807202122bb2438dac9b8aa6..92eeb3866494247256143c10093b73d4ae94aa20 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1870,6 +1870,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
        unsigned long nr_pages = 1, end_addr;
        unsigned long pfn;
        unsigned long hsz = 0;
+       int ptes = 0;
 
        /*
         * When racing against e.g. zap_pte_range() on another cpu,
@@ -1910,10 +1911,34 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                 */
                if (!(flags & TTU_IGNORE_MLOCK) &&
                    (vma->vm_flags & VM_LOCKED)) {
+                       ptes++;
+
+                       /*
+                        * Set 'ret' to indicate the page cannot be unmapped.
+                        *
+                        * Do not jump to walk_abort immediately as additional
+                        * iteration might be required to detect fully mapped
+                        * folio an mlock it.
+                        */
+                       ret = false;
+
+                       /* Only mlock fully mapped pages */
+                       if (pvmw.pte && ptes != pvmw.nr_pages)
+                               continue;
+
+                       /*
+                        * All PTEs must be protected by page table lock in
+                        * order to mlock the page.
+                        *
+                        * If page table boundary has been cross, current ptl
+                        * only protect part of ptes.
+                        */
+                       if (pvmw.flags & PVMW_PGTABLE_CROSSED)
+                               goto walk_done;
+
                        /* Restore the mlock which got missed */
-                       if (!folio_test_large(folio))
-                               mlock_vma_folio(folio, vma);
-                       goto walk_abort;
+                       mlock_vma_folio(folio, vma);
+                       goto walk_done;
                }
 
                if (!pvmw.pte) {