]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: introduce is_pmd_order helper
authorNico Pache <npache@redhat.com>
Wed, 25 Mar 2026 11:40:19 +0000 (05:40 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:29 +0000 (13:53 -0700)
In order to add mTHP support to khugepaged, we will often be checking if a
given order is (or is not) a PMD order.  Some places in the kernel already
use this check, so lets create a simple helper function to keep the code
clean and readable.

Link: https://lkml.kernel.org/r/20260325114022.444081-3-npache@redhat.com
Signed-off-by: Nico Pache <npache@redhat.com>
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Reviewed-by: Barry Song <baohua@kernel.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suggested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rafael Aquini <raquini@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shivank Garg <shivankg@amd.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Takashi Iwai (SUSE) <tiwai@suse.de>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/khugepaged.c
mm/memory.c
mm/mempolicy.c
mm/page_alloc.c
mm/shmem.c

index a4d9f964dfdea981c86591f1d4f5d08bd419c1b5..bd7f0e1d80945f092f1c048a0c36c7e526099398 100644 (file)
@@ -771,6 +771,11 @@ static inline bool pmd_is_huge(pmd_t pmd)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+static inline bool is_pmd_order(unsigned int order)
+{
+       return order == HPAGE_PMD_ORDER;
+}
+
 static inline int split_folio_to_list_to_order(struct folio *folio,
                struct list_head *list, int new_order)
 {
index 9fea52ccad56165dc3e41ee6f4da6e4d2a115f88..1c1a7cf7b209bd7f0511a56c9f40696175a0d1dc 100644 (file)
@@ -4159,7 +4159,7 @@ out_unlock:
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       if (old_order == HPAGE_PMD_ORDER)
+       if (is_pmd_order(old_order))
                count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
        count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
        return ret;
index f972a9a65e3aa3a805de7373080db12d32dbd5c2..c6a5d9d1f252d9a864411e8e4d65d8b3877826c8 100644 (file)
@@ -1540,7 +1540,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
        if (IS_ERR(folio))
                return SCAN_PAGE_NULL;
 
-       if (folio_order(folio) != HPAGE_PMD_ORDER) {
+       if (!is_pmd_order(folio_order(folio))) {
                result = SCAN_PAGE_COMPOUND;
                goto drop_folio;
        }
@@ -2023,7 +2023,7 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
                 * we locked the first folio, then a THP might be there already.
                 * This will be discovered on the first iteration.
                 */
-               if (folio_order(folio) == HPAGE_PMD_ORDER) {
+               if (is_pmd_order(folio_order(folio))) {
                        result = SCAN_PTE_MAPPED_HUGEPAGE;
                        goto out_unlock;
                }
@@ -2351,7 +2351,7 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm,
                        continue;
                }
 
-               if (folio_order(folio) == HPAGE_PMD_ORDER) {
+               if (is_pmd_order(folio_order(folio))) {
                        result = SCAN_PTE_MAPPED_HUGEPAGE;
                        /*
                         * PMD-sized THP implies that we can only try
index 7c350a38fecfc3b1c72c9f1c29d678dda2be11de..6d54e5ec82f2bdf45002e40e6bc20194a253f03d 100644 (file)
@@ -5435,7 +5435,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
        if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
                return ret;
 
-       if (folio_order(folio) != HPAGE_PMD_ORDER)
+       if (!is_pmd_order(folio_order(folio)))
                return ret;
        page = &folio->page;
 
index 0e5175f1c767d81394276559b9610c24d854f5bc..e5528c35bbb8a4fa8964738e8b74b051bc044347 100644 (file)
@@ -2449,7 +2449,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
 
        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
            /* filter "hugepage" allocation, unless from alloc_pages() */
-           order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
+           is_pmd_order(order) && ilx != NO_INTERLEAVE_INDEX) {
                /*
                 * For hugepage allocation and non-interleave policy which
                 * allows the current node (or other explicitly preferred
index 937e9b8507091f8ce74a9568b08ebfa33373d337..cdde59e56a55a43bdb2984adcc4c6f4ad42aafd6 100644 (file)
@@ -651,7 +651,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        bool movable;
        if (order > PAGE_ALLOC_COSTLY_ORDER) {
-               VM_BUG_ON(order != HPAGE_PMD_ORDER);
+               VM_BUG_ON(!is_pmd_order(order));
 
                movable = migratetype == MIGRATE_MOVABLE;
 
@@ -683,7 +683,7 @@ static inline bool pcp_allowed_order(unsigned int order)
        if (order <= PAGE_ALLOC_COSTLY_ORDER)
                return true;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (order == HPAGE_PMD_ORDER)
+       if (is_pmd_order(order))
                return true;
 #endif
        return false;
index 5e7dcf5bc5d3c9a758b067dac442b83d56f51840..6fa1e8340c93fbc72ab1946cae5b4c675bc01905 100644 (file)
@@ -5558,8 +5558,7 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
                spin_unlock(&huge_shmem_orders_lock);
        } else if (sysfs_streq(buf, "inherit")) {
                /* Do not override huge allocation policy with non-PMD sized mTHP */
-               if (shmem_huge == SHMEM_HUGE_FORCE &&
-                   order != HPAGE_PMD_ORDER)
+               if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
                        return -EINVAL;
 
                spin_lock(&huge_shmem_orders_lock);