From: Nico Pache Date: Wed, 25 Mar 2026 11:40:22 +0000 (-0600) Subject: mm/khugepaged: unify khugepaged and madv_collapse with collapse_single_pmd() X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a155d945b73c5b0668e898df5495afe45bb261cd;p=thirdparty%2Flinux.git mm/khugepaged: unify khugepaged and madv_collapse with collapse_single_pmd() The khugepaged daemon and madvise_collapse have two different implementations that do almost the same thing. Create collapse_single_pmd to increase code reuse and create an entry point to these two users. Refactor madvise_collapse and collapse_scan_mm_slot to use the new collapse_single_pmd function. To help reduce confusion around the mmap_locked variable, we rename mmap_locked to lock_dropped in the collapse_scan_mm_slot() function, and remove the redundant mmap_locked in madvise_collapse(); this further unifies the code readiblity. the SCAN_PTE_MAPPED_HUGEPAGE enum is no longer reachable in the madvise_collapse() function, so we drop it from the list of "continuing" enums. This introduces a minor behavioral change that is most likely an undiscovered bug. The current implementation of khugepaged tests collapse_test_exit_or_disable() before calling collapse_pte_mapped_thp, but we weren't doing it in the madvise_collapse case. By unifying these two callers madvise_collapse now also performs this check. We also modify the return value to be SCAN_ANY_PROCESS which properly indicates that this process is no longer valid to operate on. By moving the madvise_collapse writeback-retry logic into the helper function we can also avoid having to revalidate the VMA. We guard the khugepaged_pages_collapsed variable to ensure its only incremented for khugepaged. As requested we also convert a VM_BUG_ON to a VM_WARN_ON. Link: https://lkml.kernel.org/r/20260325114022.444081-6-npache@redhat.com Signed-off-by: Nico Pache Reviewed-by: Lorenzo Stoakes (Oracle) Reviewed-by: Lance Yang Reviewed-by: Baolin Wang Acked-by: David Hildenbrand (Arm) Cc: Alistair Popple Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Barry Song Cc: Brendan Jackman Cc: Byungchul Park Cc: Catalin Marinas Cc: David Rientjes Cc: Dev Jain Cc: Gregory Price Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Joshua Hahn Cc: Kefeng Wang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Matthew Brost Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Mike Rapoport Cc: Nanyong Sun Cc: Pedro Falcato Cc: Peter Xu Cc: Rafael Aquini Cc: Rakie Kim Cc: Randy Dunlap Cc: Ryan Roberts Cc: Shivank Garg Cc: Steven Rostedt Cc: Suren Baghdasaryan Cc: Takashi Iwai (SUSE) Cc: Thomas Hellström Cc: Usama Arif Cc: Vishal Moola (Oracle) Cc: Vlastimil Babka Cc: Wei Yang Cc: Will Deacon Cc: Yang Shi Cc: Zach O'Keefe Cc: Zi Yan Signed-off-by: Andrew Morton --- diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c99e330949631..d21348b85a59f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1250,7 +1250,7 @@ out_nolock: static enum scan_result collapse_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start_addr, - bool *mmap_locked, struct collapse_control *cc) + bool *lock_dropped, struct collapse_control *cc) { pmd_t *pmd; pte_t *pte, *_pte; @@ -1425,7 +1425,7 @@ out_unmap: result = collapse_huge_page(mm, start_addr, referenced, unmapped, cc); /* collapse_huge_page will return with the mmap_lock released */ - *mmap_locked = false; + *lock_dropped = true; } out: trace_mm_khugepaged_scan_pmd(mm, folio, referenced, @@ -2417,6 +2417,67 @@ static enum scan_result collapse_scan_file(struct mm_struct *mm, return result; } +/* + * Try to collapse a single PMD starting at a PMD aligned addr, and return + * the results. + */ +static enum scan_result collapse_single_pmd(unsigned long addr, + struct vm_area_struct *vma, bool *lock_dropped, + struct collapse_control *cc) +{ + struct mm_struct *mm = vma->vm_mm; + bool triggered_wb = false; + enum scan_result result; + struct file *file; + pgoff_t pgoff; + + mmap_assert_locked(mm); + + if (vma_is_anonymous(vma)) { + result = collapse_scan_pmd(mm, vma, addr, lock_dropped, cc); + goto end; + } + + file = get_file(vma->vm_file); + pgoff = linear_page_index(vma, addr); + + mmap_read_unlock(mm); + *lock_dropped = true; +retry: + result = collapse_scan_file(mm, addr, file, pgoff, cc); + + /* + * For MADV_COLLAPSE, when encountering dirty pages, try to writeback, + * then retry the collapse one time. + */ + if (!cc->is_khugepaged && result == SCAN_PAGE_DIRTY_OR_WRITEBACK && + !triggered_wb && mapping_can_writeback(file->f_mapping)) { + const loff_t lstart = (loff_t)pgoff << PAGE_SHIFT; + const loff_t lend = lstart + HPAGE_PMD_SIZE - 1; + + filemap_write_and_wait_range(file->f_mapping, lstart, lend); + triggered_wb = true; + goto retry; + } + fput(file); + + if (result == SCAN_PTE_MAPPED_HUGEPAGE) { + mmap_read_lock(mm); + if (collapse_test_exit_or_disable(mm)) + result = SCAN_ANY_PROCESS; + else + result = try_collapse_pte_mapped_thp(mm, addr, + !cc->is_khugepaged); + if (result == SCAN_PMD_MAPPED) + result = SCAN_SUCCEED; + mmap_read_unlock(mm); + } +end: + if (cc->is_khugepaged && result == SCAN_SUCCEED) + ++khugepaged_pages_collapsed; + return result; +} + static void collapse_scan_mm_slot(unsigned int progress_max, enum scan_result *result, struct collapse_control *cc) __releases(&khugepaged_mm_lock) @@ -2478,46 +2539,21 @@ static void collapse_scan_mm_slot(unsigned int progress_max, VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); while (khugepaged_scan.address < hend) { - bool mmap_locked = true; + bool lock_dropped = false; cond_resched(); if (unlikely(collapse_test_exit_or_disable(mm))) goto breakouterloop; - VM_BUG_ON(khugepaged_scan.address < hstart || + VM_WARN_ON_ONCE(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); - if (!vma_is_anonymous(vma)) { - struct file *file = get_file(vma->vm_file); - pgoff_t pgoff = linear_page_index(vma, - khugepaged_scan.address); - - mmap_read_unlock(mm); - mmap_locked = false; - *result = collapse_scan_file(mm, - khugepaged_scan.address, file, pgoff, cc); - fput(file); - if (*result == SCAN_PTE_MAPPED_HUGEPAGE) { - mmap_read_lock(mm); - if (collapse_test_exit_or_disable(mm)) - goto breakouterloop; - *result = try_collapse_pte_mapped_thp(mm, - khugepaged_scan.address, false); - if (*result == SCAN_PMD_MAPPED) - *result = SCAN_SUCCEED; - mmap_read_unlock(mm); - } - } else { - *result = collapse_scan_pmd(mm, vma, - khugepaged_scan.address, &mmap_locked, cc); - } - - if (*result == SCAN_SUCCEED) - ++khugepaged_pages_collapsed; + *result = collapse_single_pmd(khugepaged_scan.address, + vma, &lock_dropped, cc); /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; - if (!mmap_locked) + if (lock_dropped) /* * We released mmap_lock so break loop. Note * that we drop mmap_lock before all hugepage @@ -2792,7 +2828,6 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, unsigned long hstart, hend, addr; enum scan_result last_fail = SCAN_FAIL; int thps = 0; - bool mmap_locked = true; BUG_ON(vma->vm_start > start); BUG_ON(vma->vm_end < end); @@ -2814,13 +2849,11 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { enum scan_result result = SCAN_FAIL; - bool triggered_wb = false; -retry: - if (!mmap_locked) { + if (*lock_dropped) { cond_resched(); mmap_read_lock(mm); - mmap_locked = true; + *lock_dropped = false; result = hugepage_vma_revalidate(mm, addr, false, &vma, cc); if (result != SCAN_SUCCEED) { @@ -2830,45 +2863,14 @@ retry: hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); } - mmap_assert_locked(mm); - if (!vma_is_anonymous(vma)) { - struct file *file = get_file(vma->vm_file); - pgoff_t pgoff = linear_page_index(vma, addr); - mmap_read_unlock(mm); - mmap_locked = false; - *lock_dropped = true; - result = collapse_scan_file(mm, addr, file, pgoff, cc); - - if (result == SCAN_PAGE_DIRTY_OR_WRITEBACK && !triggered_wb && - mapping_can_writeback(file->f_mapping)) { - loff_t lstart = (loff_t)pgoff << PAGE_SHIFT; - loff_t lend = lstart + HPAGE_PMD_SIZE - 1; - - filemap_write_and_wait_range(file->f_mapping, lstart, lend); - triggered_wb = true; - fput(file); - goto retry; - } - fput(file); - } else { - result = collapse_scan_pmd(mm, vma, addr, &mmap_locked, cc); - } - if (!mmap_locked) - *lock_dropped = true; + result = collapse_single_pmd(addr, vma, lock_dropped, cc); -handle_result: switch (result) { case SCAN_SUCCEED: case SCAN_PMD_MAPPED: ++thps; break; - case SCAN_PTE_MAPPED_HUGEPAGE: - BUG_ON(mmap_locked); - mmap_read_lock(mm); - result = try_collapse_pte_mapped_thp(mm, addr, true); - mmap_read_unlock(mm); - goto handle_result; /* Whitelisted set of results where continuing OK */ case SCAN_NO_PTE_TABLE: case SCAN_PTE_NON_PRESENT: @@ -2891,7 +2893,7 @@ handle_result: out_maybelock: /* Caller expects us to hold mmap_lock on return */ - if (!mmap_locked) + if (*lock_dropped) mmap_read_lock(mm); out_nolock: mmap_assert_locked(mm);