]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
fs/proc/task_mmu: refactor pagemap_pmd_range()
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Mon, 10 Nov 2025 22:21:24 +0000 (22:21 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:51 +0000 (15:08 -0800)
Separate out THP logic so we can drop an indentation level and reduce the
amount of noise in this function.

We add pagemap_pmd_range_thp() for this purpose.

While we're here, convert the VM_BUG_ON() to a VM_WARN_ON_ONCE() at the
same time.

No functional change intended.

Link: https://lkml.kernel.org/r/f9ce7f3bb57e3627288225e23f2498cc5315f5ab.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c

index bf48fedaf128695cb8afb35d6c455a8b319588c5..8c35ea48a93e05e9d819b1f9daaec8cf513219a3 100644 (file)
@@ -1985,90 +1985,98 @@ out:
        return make_pme(frame, flags);
 }
 
-static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
-                            struct mm_walk *walk)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
+               unsigned long end, struct vm_area_struct *vma,
+               struct pagemapread *pm)
 {
-       struct vm_area_struct *vma = walk->vma;
-       struct pagemapread *pm = walk->private;
-       spinlock_t *ptl;
-       pte_t *pte, *orig_pte;
+       unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
+       u64 flags = 0, frame = 0;
+       pmd_t pmd = *pmdp;
+       struct page *page = NULL;
+       struct folio *folio = NULL;
        int err = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
-       ptl = pmd_trans_huge_lock(pmdp, vma);
-       if (ptl) {
-               unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
-               u64 flags = 0, frame = 0;
-               pmd_t pmd = *pmdp;
-               struct page *page = NULL;
-               struct folio *folio = NULL;
+       if (vma->vm_flags & VM_SOFTDIRTY)
+               flags |= PM_SOFT_DIRTY;
 
-               if (vma->vm_flags & VM_SOFTDIRTY)
-                       flags |= PM_SOFT_DIRTY;
+       if (pmd_present(pmd)) {
+               page = pmd_page(pmd);
 
-               if (pmd_present(pmd)) {
-                       page = pmd_page(pmd);
+               flags |= PM_PRESENT;
+               if (pmd_soft_dirty(pmd))
+                       flags |= PM_SOFT_DIRTY;
+               if (pmd_uffd_wp(pmd))
+                       flags |= PM_UFFD_WP;
+               if (pm->show_pfn)
+                       frame = pmd_pfn(pmd) + idx;
+       } else if (thp_migration_supported() && is_swap_pmd(pmd)) {
+               swp_entry_t entry = pmd_to_swp_entry(pmd);
+               unsigned long offset;
 
-                       flags |= PM_PRESENT;
-                       if (pmd_soft_dirty(pmd))
-                               flags |= PM_SOFT_DIRTY;
-                       if (pmd_uffd_wp(pmd))
-                               flags |= PM_UFFD_WP;
-                       if (pm->show_pfn)
-                               frame = pmd_pfn(pmd) + idx;
-               }
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-               else if (is_swap_pmd(pmd)) {
-                       swp_entry_t entry = pmd_to_swp_entry(pmd);
-                       unsigned long offset;
-
-                       if (pm->show_pfn) {
-                               if (is_pfn_swap_entry(entry))
-                                       offset = swp_offset_pfn(entry) + idx;
-                               else
-                                       offset = swp_offset(entry) + idx;
-                               frame = swp_type(entry) |
-                                       (offset << MAX_SWAPFILES_SHIFT);
-                       }
-                       flags |= PM_SWAP;
-                       if (pmd_swp_soft_dirty(pmd))
-                               flags |= PM_SOFT_DIRTY;
-                       if (pmd_swp_uffd_wp(pmd))
-                               flags |= PM_UFFD_WP;
-                       VM_BUG_ON(!is_pmd_migration_entry(pmd));
-                       page = pfn_swap_entry_to_page(entry);
+               if (pm->show_pfn) {
+                       if (is_pfn_swap_entry(entry))
+                               offset = swp_offset_pfn(entry) + idx;
+                       else
+                               offset = swp_offset(entry) + idx;
+                       frame = swp_type(entry) |
+                               (offset << MAX_SWAPFILES_SHIFT);
                }
-#endif
+               flags |= PM_SWAP;
+               if (pmd_swp_soft_dirty(pmd))
+                       flags |= PM_SOFT_DIRTY;
+               if (pmd_swp_uffd_wp(pmd))
+                       flags |= PM_UFFD_WP;
+               VM_WARN_ON_ONCE(!is_pmd_migration_entry(pmd));
+               page = pfn_swap_entry_to_page(entry);
+       }
 
-               if (page) {
-                       folio = page_folio(page);
-                       if (!folio_test_anon(folio))
-                               flags |= PM_FILE;
-               }
+       if (page) {
+               folio = page_folio(page);
+               if (!folio_test_anon(folio))
+                       flags |= PM_FILE;
+       }
 
-               for (; addr != end; addr += PAGE_SIZE, idx++) {
-                       u64 cur_flags = flags;
-                       pagemap_entry_t pme;
+       for (; addr != end; addr += PAGE_SIZE, idx++) {
+               u64 cur_flags = flags;
+               pagemap_entry_t pme;
 
-                       if (folio && (flags & PM_PRESENT) &&
-                           __folio_page_mapped_exclusively(folio, page))
-                               cur_flags |= PM_MMAP_EXCLUSIVE;
+               if (folio && (flags & PM_PRESENT) &&
+                   __folio_page_mapped_exclusively(folio, page))
+                       cur_flags |= PM_MMAP_EXCLUSIVE;
 
-                       pme = make_pme(frame, cur_flags);
-                       err = add_to_pagemap(&pme, pm);
-                       if (err)
-                               break;
-                       if (pm->show_pfn) {
-                               if (flags & PM_PRESENT)
-                                       frame++;
-                               else if (flags & PM_SWAP)
-                                       frame += (1 << MAX_SWAPFILES_SHIFT);
-                       }
+               pme = make_pme(frame, cur_flags);
+               err = add_to_pagemap(&pme, pm);
+               if (err)
+                       break;
+               if (pm->show_pfn) {
+                       if (flags & PM_PRESENT)
+                               frame++;
+                       else if (flags & PM_SWAP)
+                               frame += (1 << MAX_SWAPFILES_SHIFT);
                }
+       }
+       return err;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+                            struct mm_walk *walk)
+{
+       struct vm_area_struct *vma = walk->vma;
+       struct pagemapread *pm = walk->private;
+       spinlock_t *ptl;
+       pte_t *pte, *orig_pte;
+       int err = 0;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       ptl = pmd_trans_huge_lock(pmdp, vma);
+       if (ptl) {
+               err = pagemap_pmd_range_thp(pmdp, addr, end, vma, pm);
                spin_unlock(ptl);
                return err;
        }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
 
        /*
         * We can assume that @vma always points to a valid one and @end never