From: SeongJae Park Date: Wed, 12 Nov 2025 15:41:06 +0000 (-0800) Subject: mm/damon/vaddr: cleanup using pmd_trans_huge_lock() X-Git-Tag: v6.19-rc1~112^2~110 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=96549d56b89744bc4e9e221bd8abf089c9004d29;p=thirdparty%2Fkernel%2Flinux.git mm/damon/vaddr: cleanup using pmd_trans_huge_lock() Three pmd walk functions in vaddr.c are using pmd_trans_huge() and pmd_lock() to handle THPs. Simplify the code by replacing the two function calls with a single pmd_trans_huge_lock() call. Note that this cleanup is not only reducing the lines of code, but also simplifies code execution flows for migration entries case, as kindly explained [1] by Hugh, who suggested this cleanup. [sj@kernel.org: provide lvalue to pmd_present()] Link: https://lkml.kernel.org/r/20251117154415.11041-1-sj@kernel.org Link: https://lkml.kernel.org/r/20251112154114.66053-4-sj@kernel.org Link: https://lore.kernel.org/296c2b3f-6748-158f-b85d-2952165c0588@google.com [1] Signed-off-by: SeongJae Park Suggested-by: Hugh Dickins Cc: Bill Wendling Cc: Brendan Higgins Cc: David Gow Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Justin Stitt Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Miguel Ojeda Cc: Mike Rapoport Cc: Nathan Chancellor Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: kernel test robot Signed-off-by: Andrew Morton --- diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 7e834467b2d81..83d9b09c86a86 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -307,24 +307,16 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { pte_t *pte; - pmd_t pmde; spinlock_t *ptl; - if (pmd_trans_huge(pmdp_get(pmd))) { - ptl = pmd_lock(walk->mm, pmd); - pmde = pmdp_get(pmd); - - if (!pmd_present(pmde)) { - spin_unlock(ptl); - return 0; - } + ptl = pmd_trans_huge_lock(pmd, walk->vma); + if (ptl) { + pmd_t pmde = pmdp_get(pmd); - if (pmd_trans_huge(pmde)) { + if (pmd_present(pmde)) damon_pmdp_mkold(pmd, walk->vma, addr); - spin_unlock(ptl); - return 0; - } spin_unlock(ptl); + return 0; } pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); @@ -446,21 +438,12 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, struct damon_young_walk_private *priv = walk->private; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_trans_huge(pmdp_get(pmd))) { - pmd_t pmde; - - ptl = pmd_lock(walk->mm, pmd); - pmde = pmdp_get(pmd); + ptl = pmd_trans_huge_lock(pmd, walk->vma); + if (ptl) { + pmd_t pmde = pmdp_get(pmd); - if (!pmd_present(pmde)) { - spin_unlock(ptl); - return 0; - } - - if (!pmd_trans_huge(pmde)) { - spin_unlock(ptl); - goto regular_page; - } + if (!pmd_present(pmde)) + goto huge_out; folio = damon_get_folio(pmd_pfn(pmde)); if (!folio) goto huge_out; @@ -474,8 +457,6 @@ huge_out: spin_unlock(ptl); return 0; } - -regular_page: #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); @@ -910,13 +891,10 @@ static int damos_va_stat_pmd_entry(pmd_t *pmd, unsigned long addr, int nr; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_trans_huge(*pmd)) { - pmd_t pmde; + ptl = pmd_trans_huge_lock(pmd, vma); + if (ptl) { + pmd_t pmde = pmdp_get(pmd); - ptl = pmd_trans_huge_lock(pmd, vma); - if (!ptl) - return 0; - pmde = pmdp_get(pmd); if (!pmd_present(pmde)) goto huge_unlock;