]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/vmscan: fix unintended mtc->nmask mutation in alloc_demote_folio()
authorBing Jiao <bingjiao@google.com>
Tue, 3 Mar 2026 05:25:17 +0000 (05:25 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:18 +0000 (13:53 -0700)
In alloc_demote_folio(), mtc->nmask is set to NULL for the first
allocation.  If that succeeds, it returns without restoring mtc->nmask to
allowed_mask.  For subsequent allocations from the migrate_pages() batch,
mtc->nmask will be NULL.  If the target node then becomes full, the
fallback allocation will use nmask = NULL, allocating from any node
allowed by the task cpuset, which for kswapd is all nodes.

To address this issue, use a local copy of the mtc structure with nmask =
NULL for the first allocation attempt specifically, ensuring the original
mtc remains unmodified.

Link: https://lkml.kernel.org/r/20260303052519.109244-1-bingjiao@google.com
Fixes: 320080272892 ("mm/demotion: demote pages according to allocation fallback order")
Signed-off-by: Bing Jiao <bingjiao@google.com>
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Wei Xu <weixugc@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 3a4a0a81c8719c89b814b7e9bc00c2f9da75a047..641a6063f3758431ef5180043307e07e61975da6 100644 (file)
@@ -985,13 +985,11 @@ static void folio_check_dirty_writeback(struct folio *folio,
 static struct folio *alloc_demote_folio(struct folio *src,
                unsigned long private)
 {
+       struct migration_target_control *mtc, target_nid_mtc;
        struct folio *dst;
-       nodemask_t *allowed_mask;
-       struct migration_target_control *mtc;
 
        mtc = (struct migration_target_control *)private;
 
-       allowed_mask = mtc->nmask;
        /*
         * make sure we allocate from the target node first also trying to
         * demote or reclaim pages from the target node via kswapd if we are
@@ -1001,15 +999,13 @@ static struct folio *alloc_demote_folio(struct folio *src,
         * a demotion of cold pages from the target memtier. This can result
         * in the kernel placing hot pages in slower(lower) memory tiers.
         */
-       mtc->nmask = NULL;
-       mtc->gfp_mask |= __GFP_THISNODE;
-       dst = alloc_migration_target(src, (unsigned long)mtc);
+       target_nid_mtc = *mtc;
+       target_nid_mtc.nmask = NULL;
+       target_nid_mtc.gfp_mask |= __GFP_THISNODE;
+       dst = alloc_migration_target(src, (unsigned long)&target_nid_mtc);
        if (dst)
                return dst;
 
-       mtc->gfp_mask &= ~__GFP_THISNODE;
-       mtc->nmask = allowed_mask;
-
        return alloc_migration_target(src, (unsigned long)mtc);
 }