]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: split can_change_pte_writable() into private and shared parts
authorDev Jain <dev.jain@arm.com>
Fri, 18 Jul 2025 09:02:42 +0000 (14:32 +0530)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:41 +0000 (19:12 -0700)
In preparation for patch 6 and modularizing the code in general, split
can_change_pte_writable() into private and shared VMA parts.  No
functional change intended.

Link: https://lkml.kernel.org/r/20250718090244.21092-6-dev.jain@arm.com
Signed-off-by: Dev Jain <dev.jain@arm.com>
Suggested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joey Gouly <joey.gouly@arm.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Zhenhua Huang <quic_zhenhuah@quicinc.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mprotect.c

index 4977f198168ef2779b4b46afffb45c88c7673f7e..a1c7d8a4648d69f97469b21721e250df3162adf7 100644 (file)
 
 #include "internal.h"
 
-bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
-                            pte_t pte)
+static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte)
 {
-       struct page *page;
-
        if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
                return false;
 
@@ -60,16 +57,32 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
        if (userfaultfd_pte_wp(vma, pte))
                return false;
 
-       if (!(vma->vm_flags & VM_SHARED)) {
-               /*
-                * Writable MAP_PRIVATE mapping: We can only special-case on
-                * exclusive anonymous pages, because we know that our
-                * write-fault handler similarly would map them writable without
-                * any additional checks while holding the PT lock.
-                */
-               page = vm_normal_page(vma, addr, pte);
-               return page && PageAnon(page) && PageAnonExclusive(page);
-       }
+       return true;
+}
+
+static bool can_change_private_pte_writable(struct vm_area_struct *vma,
+                                           unsigned long addr, pte_t pte)
+{
+       struct page *page;
+
+       if (!maybe_change_pte_writable(vma, pte))
+               return false;
+
+       /*
+        * Writable MAP_PRIVATE mapping: We can only special-case on
+        * exclusive anonymous pages, because we know that our
+        * write-fault handler similarly would map them writable without
+        * any additional checks while holding the PT lock.
+        */
+       page = vm_normal_page(vma, addr, pte);
+       return page && PageAnon(page) && PageAnonExclusive(page);
+}
+
+static bool can_change_shared_pte_writable(struct vm_area_struct *vma,
+                                          pte_t pte)
+{
+       if (!maybe_change_pte_writable(vma, pte))
+               return false;
 
        VM_WARN_ON_ONCE(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte));
 
@@ -83,6 +96,15 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
        return pte_dirty(pte);
 }
 
+bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
+                            pte_t pte)
+{
+       if (!(vma->vm_flags & VM_SHARED))
+               return can_change_private_pte_writable(vma, addr, pte);
+
+       return can_change_shared_pte_writable(vma, pte);
+}
+
 static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
                                    pte_t pte, int max_nr_ptes)
 {