From: Nico Pache Date: Wed, 25 Mar 2026 11:40:19 +0000 (-0600) Subject: mm: introduce is_pmd_order helper X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b90c453d2664ba445383956560581f9db708584f;p=thirdparty%2Fkernel%2Flinux.git mm: introduce is_pmd_order helper In order to add mTHP support to khugepaged, we will often be checking if a given order is (or is not) a PMD order. Some places in the kernel already use this check, so lets create a simple helper function to keep the code clean and readable. Link: https://lkml.kernel.org/r/20260325114022.444081-3-npache@redhat.com Signed-off-by: Nico Pache Acked-by: David Hildenbrand (Arm) Reviewed-by: Baolin Wang Reviewed-by: Dev Jain Reviewed-by: Wei Yang Reviewed-by: Lance Yang Reviewed-by: Barry Song Reviewed-by: Zi Yan Reviewed-by: Pedro Falcato Reviewed-by: Lorenzo Stoakes Suggested-by: Lorenzo Stoakes Cc: Alistair Popple Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Brendan Jackman Cc: Byungchul Park Cc: Catalin Marinas Cc: David Rientjes Cc: Gregory Price Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Joshua Hahn Cc: Kefeng Wang Cc: Liam Howlett Cc: Lorenzo Stoakes (Oracle) Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Matthew Brost Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Mike Rapoport Cc: Nanyong Sun Cc: Peter Xu Cc: Rafael Aquini Cc: Rakie Kim Cc: Randy Dunlap Cc: Ryan Roberts Cc: Shivank Garg Cc: Steven Rostedt Cc: Suren Baghdasaryan Cc: Takashi Iwai (SUSE) Cc: Thomas Hellström Cc: Usama Arif Cc: Vishal Moola (Oracle) Cc: Vlastimil Babka Cc: Will Deacon Cc: Yang Shi Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a4d9f964dfdea..bd7f0e1d80945 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -771,6 +771,11 @@ static inline bool pmd_is_huge(pmd_t pmd) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline bool is_pmd_order(unsigned int order) +{ + return order == HPAGE_PMD_ORDER; +} + static inline int split_folio_to_list_to_order(struct folio *folio, struct list_head *list, int new_order) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9fea52ccad561..1c1a7cf7b209b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4159,7 +4159,7 @@ out_unlock: i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - if (old_order == HPAGE_PMD_ORDER) + if (is_pmd_order(old_order)) count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); return ret; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f972a9a65e3aa..c6a5d9d1f252d 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1540,7 +1540,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign if (IS_ERR(folio)) return SCAN_PAGE_NULL; - if (folio_order(folio) != HPAGE_PMD_ORDER) { + if (!is_pmd_order(folio_order(folio))) { result = SCAN_PAGE_COMPOUND; goto drop_folio; } @@ -2023,7 +2023,7 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr, * we locked the first folio, then a THP might be there already. * This will be discovered on the first iteration. */ - if (folio_order(folio) == HPAGE_PMD_ORDER) { + if (is_pmd_order(folio_order(folio))) { result = SCAN_PTE_MAPPED_HUGEPAGE; goto out_unlock; } @@ -2351,7 +2351,7 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, continue; } - if (folio_order(folio) == HPAGE_PMD_ORDER) { + if (is_pmd_order(folio_order(folio))) { result = SCAN_PTE_MAPPED_HUGEPAGE; /* * PMD-sized THP implies that we can only try diff --git a/mm/memory.c b/mm/memory.c index 7c350a38fecfc..6d54e5ec82f2b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5435,7 +5435,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; - if (folio_order(folio) != HPAGE_PMD_ORDER) + if (!is_pmd_order(folio_order(folio))) return ret; page = &folio->page; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0e5175f1c767d..e5528c35bbb8a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2449,7 +2449,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && /* filter "hugepage" allocation, unless from alloc_pages() */ - order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) { + is_pmd_order(order) && ilx != NO_INTERLEAVE_INDEX) { /* * For hugepage allocation and non-interleave policy which * allows the current node (or other explicitly preferred diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 937e9b8507091..cdde59e56a55a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -651,7 +651,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order) #ifdef CONFIG_TRANSPARENT_HUGEPAGE bool movable; if (order > PAGE_ALLOC_COSTLY_ORDER) { - VM_BUG_ON(order != HPAGE_PMD_ORDER); + VM_BUG_ON(!is_pmd_order(order)); movable = migratetype == MIGRATE_MOVABLE; @@ -683,7 +683,7 @@ static inline bool pcp_allowed_order(unsigned int order) if (order <= PAGE_ALLOC_COSTLY_ORDER) return true; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (order == HPAGE_PMD_ORDER) + if (is_pmd_order(order)) return true; #endif return false; diff --git a/mm/shmem.c b/mm/shmem.c index 5e7dcf5bc5d3c..6fa1e8340c93f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5558,8 +5558,7 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, spin_unlock(&huge_shmem_orders_lock); } else if (sysfs_streq(buf, "inherit")) { /* Do not override huge allocation policy with non-PMD sized mTHP */ - if (shmem_huge == SHMEM_HUGE_FORCE && - order != HPAGE_PMD_ORDER) + if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order)) return -EINVAL; spin_lock(&huge_shmem_orders_lock);