]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/page_alloc: refactor the initial compaction handling
authorVlastimil Babka <vbabka@suse.cz>
Tue, 6 Jan 2026 11:52:37 +0000 (12:52 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 04:02:23 +0000 (20:02 -0800)
The initial direct compaction done in some cases in
__alloc_pages_slowpath() stands out from the main retry loop of reclaim +
compaction.

We can simplify this by instead skipping the initial reclaim attempt via a
new local variable compact_first, and handle the compact_prority as
necessary to match the original behavior.  No functional change intended.

Link: https://lkml.kernel.org/r/20260106-thp-thisnode-tweak-v3-2-f5d67c21a193@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Joshua Hahn <joshua.hahnjy@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/gfp.h
mm/page_alloc.c

index b155929af5b1103aefaf186673da3599ee994199..f9fdc99ae594ad48579ea20a5265670d56abc0ef 100644 (file)
@@ -407,9 +407,15 @@ extern gfp_t gfp_allowed_mask;
 /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
 
+/* A helper for checking if gfp includes all the specified flags */
+static inline bool gfp_has_flags(gfp_t gfp, gfp_t flags)
+{
+       return (gfp & flags) == flags;
+}
+
 static inline bool gfp_has_io_fs(gfp_t gfp)
 {
-       return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+       return gfp_has_flags(gfp, __GFP_IO | __GFP_FS);
 }
 
 /*
index 8e6d2e61374ae1804738640be49adde2a8af5c42..848c5c93ccb5eb38072ab0988f198d78b2f3c728 100644 (file)
@@ -4694,7 +4694,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
 {
        bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
-       bool can_compact = gfp_compaction_allowed(gfp_mask);
+       bool can_compact = can_direct_reclaim && gfp_compaction_allowed(gfp_mask);
        bool nofail = gfp_mask & __GFP_NOFAIL;
        const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
        struct page *page = NULL;
@@ -4707,6 +4707,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        unsigned int cpuset_mems_cookie;
        unsigned int zonelist_iter_cookie;
        int reserve_flags;
+       bool compact_first = false;
 
        if (unlikely(nofail)) {
                /*
@@ -4730,6 +4731,19 @@ restart:
        cpuset_mems_cookie = read_mems_allowed_begin();
        zonelist_iter_cookie = zonelist_iter_begin();
 
+       /*
+        * For costly allocations, try direct compaction first, as it's likely
+        * that we have enough base pages and don't need to reclaim. For non-
+        * movable high-order allocations, do that as well, as compaction will
+        * try prevent permanent fragmentation by migrating from blocks of the
+        * same migratetype.
+        */
+       if (can_compact && (costly_order || (order > 0 &&
+                                       ac->migratetype != MIGRATE_MOVABLE))) {
+               compact_first = true;
+               compact_priority = INIT_COMPACT_PRIORITY;
+       }
+
        /*
         * The fast path uses conservative alloc_flags to succeed only until
         * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -4772,53 +4786,6 @@ restart:
        if (page)
                goto got_pg;
 
-       /*
-        * For costly allocations, try direct compaction first, as it's likely
-        * that we have enough base pages and don't need to reclaim. For non-
-        * movable high-order allocations, do that as well, as compaction will
-        * try prevent permanent fragmentation by migrating from blocks of the
-        * same migratetype.
-        * Don't try this for allocations that are allowed to ignore
-        * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
-        */
-       if (can_direct_reclaim && can_compact &&
-                       (costly_order ||
-                          (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
-                       && !gfp_pfmemalloc_allowed(gfp_mask)) {
-               page = __alloc_pages_direct_compact(gfp_mask, order,
-                                               alloc_flags, ac,
-                                               INIT_COMPACT_PRIORITY,
-                                               &compact_result);
-               if (page)
-                       goto got_pg;
-
-               /*
-                * Checks for costly allocations with __GFP_NORETRY, which
-                * includes some THP page fault allocations
-                */
-               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
-                       /*
-                        * THP page faults may attempt local node only first,
-                        * but are then allowed to only compact, not reclaim,
-                        * see alloc_pages_mpol().
-                        *
-                        * Compaction has failed above and we don't want such
-                        * THP allocations to put reclaim pressure on a single
-                        * node in a situation where other nodes might have
-                        * plenty of available memory.
-                        */
-                       if (gfp_mask & __GFP_THISNODE)
-                               goto nopage;
-
-                       /*
-                        * Proceed with single round of reclaim/compaction, but
-                        * since sync compaction could be very expensive, keep
-                        * using async compaction.
-                        */
-                       compact_priority = INIT_COMPACT_PRIORITY;
-               }
-       }
-
 retry:
        /*
         * Deal with possible cpuset update races or zonelist updates to avoid
@@ -4862,10 +4829,12 @@ retry:
                goto nopage;
 
        /* Try direct reclaim and then allocating */
-       page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
-                                                       &did_some_progress);
-       if (page)
-               goto got_pg;
+       if (!compact_first) {
+               page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags,
+                                                       ac, &did_some_progress);
+               if (page)
+                       goto got_pg;
+       }
 
        /* Try direct compaction and then allocating */
        page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
@@ -4873,6 +4842,33 @@ retry:
        if (page)
                goto got_pg;
 
+       if (compact_first) {
+               /*
+                * THP page faults may attempt local node only first, but are
+                * then allowed to only compact, not reclaim, see
+                * alloc_pages_mpol().
+                *
+                * Compaction has failed above and we don't want such THP
+                * allocations to put reclaim pressure on a single node in a
+                * situation where other nodes might have plenty of available
+                * memory.
+                */
+               if (gfp_has_flags(gfp_mask, __GFP_NORETRY | __GFP_THISNODE))
+                       goto nopage;
+
+               /*
+                * For the initial compaction attempt we have lowered its
+                * priority. Restore it for further retries, if those are
+                * allowed. With __GFP_NORETRY there will be a single round of
+                * reclaim and compaction with the lowered priority.
+                */
+               if (!(gfp_mask & __GFP_NORETRY))
+                       compact_priority = DEF_COMPACT_PRIORITY;
+
+               compact_first = false;
+               goto retry;
+       }
+
        /* Do not loop if specifically requested */
        if (gfp_mask & __GFP_NORETRY)
                goto nopage;