]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/khugepaged: use start_addr/addr for improved readability
authorWei Yang <richard.weiyang@gmail.com>
Mon, 22 Sep 2025 14:09:38 +0000 (14:09 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 28 Sep 2025 18:51:34 +0000 (11:51 -0700)
When collapsing a pmd, there are two address in use:

  * address points to the start of pmd
  * address points to each individual page

Current naming makes it difficult to distinguish these two and is hence
error prone.

Considering the plan to collapse mTHP, name the first one `start_addr' and
the second `addr' for better readability and consistency.

Link: https://lkml.kernel.org/r/20250922140938.27343-1-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Nico Pache <npache@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 52786ffef80a1aad512fc03c3d0494949be55587..7ab2d1a42df336341f279baec352d350d6616ee0 100644 (file)
@@ -537,18 +537,19 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
 }
 
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
-                                       unsigned long address,
+                                       unsigned long start_addr,
                                        pte_t *pte,
                                        struct collapse_control *cc,
                                        struct list_head *compound_pagelist)
 {
        struct page *page = NULL;
        struct folio *folio = NULL;
+       unsigned long addr = start_addr;
        pte_t *_pte;
        int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
 
        for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
-            _pte++, address += PAGE_SIZE) {
+            _pte++, addr += PAGE_SIZE) {
                pte_t pteval = ptep_get(_pte);
                if (pte_none(pteval) || (pte_present(pteval) &&
                                is_zero_pfn(pte_pfn(pteval)))) {
@@ -571,7 +572,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        result = SCAN_PTE_UFFD_WP;
                        goto out;
                }
-               page = vm_normal_page(vma, address, pteval);
+               page = vm_normal_page(vma, addr, pteval);
                if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
                        result = SCAN_PAGE_NULL;
                        goto out;
@@ -656,8 +657,8 @@ next:
                 */
                if (cc->is_khugepaged &&
                    (pte_young(pteval) || folio_test_young(folio) ||
-                    folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
-                                                                    address)))
+                    folio_test_referenced(folio) ||
+                    mmu_notifier_test_young(vma->vm_mm, addr)))
                        referenced++;
        }
 
@@ -986,21 +987,21 @@ static int check_pmd_still_valid(struct mm_struct *mm,
  */
 static int __collapse_huge_page_swapin(struct mm_struct *mm,
                                       struct vm_area_struct *vma,
-                                      unsigned long haddr, pmd_t *pmd,
+                                      unsigned long start_addr, pmd_t *pmd,
                                       int referenced)
 {
        int swapped_in = 0;
        vm_fault_t ret = 0;
-       unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
+       unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
        int result;
        pte_t *pte = NULL;
        spinlock_t *ptl;
 
-       for (address = haddr; address < end; address += PAGE_SIZE) {
+       for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
                struct vm_fault vmf = {
                        .vma = vma,
-                       .address = address,
-                       .pgoff = linear_page_index(vma, address),
+                       .address = addr,
+                       .pgoff = linear_page_index(vma, addr),
                        .flags = FAULT_FLAG_ALLOW_RETRY,
                        .pmd = pmd,
                };
@@ -1010,7 +1011,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
                         * Here the ptl is only used to check pte_same() in
                         * do_swap_page(), so readonly version is enough.
                         */
-                       pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
+                       pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
                        if (!pte) {
                                mmap_read_unlock(mm);
                                result = SCAN_PMD_NULL;
@@ -1253,7 +1254,7 @@ out_nolock:
 
 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                                   struct vm_area_struct *vma,
-                                  unsigned long address, bool *mmap_locked,
+                                  unsigned long start_addr, bool *mmap_locked,
                                   struct collapse_control *cc)
 {
        pmd_t *pmd;
@@ -1262,26 +1263,26 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
        int none_or_zero = 0, shared = 0;
        struct page *page = NULL;
        struct folio *folio = NULL;
-       unsigned long _address;
+       unsigned long addr;
        spinlock_t *ptl;
        int node = NUMA_NO_NODE, unmapped = 0;
 
-       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+       VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
 
-       result = find_pmd_or_thp_or_none(mm, address, &pmd);
+       result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
        if (result != SCAN_SUCCEED)
                goto out;
 
        memset(cc->node_load, 0, sizeof(cc->node_load));
        nodes_clear(cc->alloc_nmask);
-       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+       pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
        if (!pte) {
                result = SCAN_PMD_NULL;
                goto out;
        }
 
-       for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
-            _pte++, _address += PAGE_SIZE) {
+       for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
+            _pte++, addr += PAGE_SIZE) {
                pte_t pteval = ptep_get(_pte);
                if (is_swap_pte(pteval)) {
                        ++unmapped;
@@ -1329,7 +1330,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                        goto out_unmap;
                }
 
-               page = vm_normal_page(vma, _address, pteval);
+               page = vm_normal_page(vma, addr, pteval);
                if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
                        result = SCAN_PAGE_NULL;
                        goto out_unmap;
@@ -1398,7 +1399,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                if (cc->is_khugepaged &&
                    (pte_young(pteval) || folio_test_young(folio) ||
                     folio_test_referenced(folio) ||
-                    mmu_notifier_test_young(vma->vm_mm, _address)))
+                    mmu_notifier_test_young(vma->vm_mm, addr)))
                        referenced++;
        }
        if (cc->is_khugepaged &&
@@ -1411,7 +1412,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
 out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (result == SCAN_SUCCEED) {
-               result = collapse_huge_page(mm, address, referenced,
+               result = collapse_huge_page(mm, start_addr, referenced,
                                            unmapped, cc);
                /* collapse_huge_page will return with the mmap_lock released */
                *mmap_locked = false;