We still mention compound_mapcount() in two comments.
Instead of simply referring to the folio mapcount in both places, let's
factor out the odd-looking PTL sync into sync_with_folio_pmd_zap(), and
add centralized documentation why this is required.
[akpm@linux-foundation.org: update comment per Matthew and David]
Link: https://lkml.kernel.org/r/20260223163920.287720-1-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
+/**
+ * sync_with_folio_pmd_zap - sync with concurrent zapping of a folio PMD
+ * @mm: The mm_struct.
+ * @pmdp: Pointer to the pmd that was found to be pmd_none().
+ *
+ * When we find a pmd_none() while unmapping a folio without holding the PTL,
+ * zap_huge_pmd() may have cleared the PMD but not yet modified the folio to
+ * indicate that it's unmapped. Skipping the PMD without synchronization could
+ * make folio unmapping code assume that unmapping failed.
+ *
+ * Wait for concurrent zapping to complete by grabbing the PTL.
+ */
+static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
+{
+ spinlock_t *ptl = pmd_lock(mm, pmdp);
+
+ spin_unlock(ptl);
+}
+
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
} else if (details && details->single_folio &&
folio_test_pmd_mappable(details->single_folio) &&
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
- spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
- /*
- * Take and drop THP pmd lock so that we cannot return
- * prematurely, while zap_huge_pmd() has cleared *pmd,
- * but not yet decremented compound_mapcount().
- */
- spin_unlock(ptl);
+ sync_with_folio_pmd_zap(tlb->mm, pmd);
}
if (pmd_none(*pmd)) {
addr = next;
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
} else if (!pmd_present(pmde)) {
- /*
- * If PVMW_SYNC, take and drop THP pmd lock so that we
- * cannot return prematurely, while zap_huge_pmd() has
- * cleared *pmd but not decremented compound_mapcount().
- */
const softleaf_t entry = softleaf_from_pmd(pmde);
if (softleaf_is_device_private(entry)) {
if ((pvmw->flags & PVMW_SYNC) &&
thp_vma_suitable_order(vma, pvmw->address,
PMD_ORDER) &&
- (pvmw->nr_pages >= HPAGE_PMD_NR)) {
- spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+ (pvmw->nr_pages >= HPAGE_PMD_NR))
+ sync_with_folio_pmd_zap(mm, pvmw->pmd);
- spin_unlock(ptl);
- }
step_forward(pvmw, PMD_SIZE);
continue;
}