]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: centralize+fix comments about compound_mapcount() in new sync_with_folio_pmd_zap()
authorDavid Hildenbrand (Arm) <david@kernel.org>
Mon, 23 Feb 2026 16:39:20 +0000 (17:39 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:03 +0000 (13:53 -0700)
We still mention compound_mapcount() in two comments.

Instead of simply referring to the folio mapcount in both places, let's
factor out the odd-looking PTL sync into sync_with_folio_pmd_zap(), and
add centralized documentation why this is required.

[akpm@linux-foundation.org: update comment per Matthew and David]
Link: https://lkml.kernel.org/r/20260223163920.287720-1-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/memory.c
mm/page_vma_mapped.c

index cb0af847d7d99d81e4c363ba76b54697cb242ee3..39ab37bb0e1dd604509d1bc49f51729189d7b0d2 100644 (file)
@@ -516,6 +516,25 @@ void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
 
 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 
+/**
+ * sync_with_folio_pmd_zap - sync with concurrent zapping of a folio PMD
+ * @mm: The mm_struct.
+ * @pmdp: Pointer to the pmd that was found to be pmd_none().
+ *
+ * When we find a pmd_none() while unmapping a folio without holding the PTL,
+ * zap_huge_pmd() may have cleared the PMD but not yet modified the folio to
+ * indicate that it's unmapped. Skipping the PMD without synchronization could
+ * make folio unmapping code assume that unmapping failed.
+ *
+ * Wait for concurrent zapping to complete by grabbing the PTL.
+ */
+static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
+{
+       spinlock_t *ptl = pmd_lock(mm, pmdp);
+
+       spin_unlock(ptl);
+}
+
 struct zap_details;
 void unmap_page_range(struct mmu_gather *tlb,
                             struct vm_area_struct *vma,
index af26a697562bb7cee942fa7d3b185dd8d31024dc..f78ab3869f8db2c82d03b07d01064e75240a3812 100644 (file)
@@ -1993,13 +1993,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                } else if (details && details->single_folio &&
                           folio_test_pmd_mappable(details->single_folio) &&
                           next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
-                       spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
-                       /*
-                        * Take and drop THP pmd lock so that we cannot return
-                        * prematurely, while zap_huge_pmd() has cleared *pmd,
-                        * but not yet decremented compound_mapcount().
-                        */
-                       spin_unlock(ptl);
+                       sync_with_folio_pmd_zap(tlb->mm, pmd);
                }
                if (pmd_none(*pmd)) {
                        addr = next;
index b38a1d00c971b161383ffd8078f41a42ebe81eb8..a4d52fdb3056d5e9d562a3ca5d4c2208ae341172 100644 (file)
@@ -269,11 +269,6 @@ restart:
                        spin_unlock(pvmw->ptl);
                        pvmw->ptl = NULL;
                } else if (!pmd_present(pmde)) {
-                       /*
-                        * If PVMW_SYNC, take and drop THP pmd lock so that we
-                        * cannot return prematurely, while zap_huge_pmd() has
-                        * cleared *pmd but not decremented compound_mapcount().
-                        */
                        const softleaf_t entry = softleaf_from_pmd(pmde);
 
                        if (softleaf_is_device_private(entry)) {
@@ -284,11 +279,9 @@ restart:
                        if ((pvmw->flags & PVMW_SYNC) &&
                            thp_vma_suitable_order(vma, pvmw->address,
                                                   PMD_ORDER) &&
-                           (pvmw->nr_pages >= HPAGE_PMD_NR)) {
-                               spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+                           (pvmw->nr_pages >= HPAGE_PMD_NR))
+                               sync_with_folio_pmd_zap(mm, pvmw->pmd);
 
-                               spin_unlock(ptl);
-                       }
                        step_forward(pvmw, PMD_SIZE);
                        continue;
                }