]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/vmscan: make __node_reclaim() more generic
authorDavidlohr Bueso <dave@stgolabs.net>
Mon, 23 Jun 2025 18:58:50 +0000 (11:58 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 20 Jul 2025 01:59:52 +0000 (18:59 -0700)
As this will be called from non page allocator paths for proactive
reclaim, allow users to pass the sc and nr of pages, and adjust the return
value as well.  No change in semantics.

Link: https://lkml.kernel.org/r/20250623185851.830632-4-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 9702ee5aa65d5f11a1302688fa91b073a678060c..d165b66da796f30123b4a375068963f02aa82763 100644 (file)
@@ -7618,36 +7618,26 @@ static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
 /*
  * Try to free up some pages from this node through reclaim.
  */
-static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
+static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
+                                   unsigned long nr_pages,
+                                   struct scan_control *sc)
 {
-       /* Minimum pages needed in order to stay on node */
-       const unsigned long nr_pages = 1 << order;
        struct task_struct *p = current;
        unsigned int noreclaim_flag;
-       struct scan_control sc = {
-               .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
-               .gfp_mask = current_gfp_context(gfp_mask),
-               .order = order,
-               .priority = NODE_RECLAIM_PRIORITY,
-               .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
-               .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
-               .may_swap = 1,
-               .reclaim_idx = gfp_zone(gfp_mask),
-       };
        unsigned long pflags;
 
-       trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
-                                          sc.gfp_mask);
+       trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, sc->order,
+                                          sc->gfp_mask);
 
        cond_resched();
        psi_memstall_enter(&pflags);
        delayacct_freepages_start();
-       fs_reclaim_acquire(sc.gfp_mask);
+       fs_reclaim_acquire(sc->gfp_mask);
        /*
         * We need to be able to allocate from the reserves for RECLAIM_UNMAP
         */
        noreclaim_flag = memalloc_noreclaim_save();
-       set_task_reclaim_state(p, &sc.reclaim_state);
+       set_task_reclaim_state(p, &sc->reclaim_state);
 
        if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
            node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
@@ -7656,24 +7646,36 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
                 * priorities until we have enough memory freed.
                 */
                do {
-                       shrink_node(pgdat, &sc);
-               } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
+                       shrink_node(pgdat, sc);
+               } while (sc->nr_reclaimed < nr_pages && --sc->priority >= 0);
        }
 
        set_task_reclaim_state(p, NULL);
        memalloc_noreclaim_restore(noreclaim_flag);
-       fs_reclaim_release(sc.gfp_mask);
+       fs_reclaim_release(sc->gfp_mask);
        delayacct_freepages_end();
        psi_memstall_leave(&pflags);
 
-       trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
+       trace_mm_vmscan_node_reclaim_end(sc->nr_reclaimed);
 
-       return sc.nr_reclaimed >= nr_pages;
+       return sc->nr_reclaimed;
 }
 
 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
 {
        int ret;
+       /* Minimum pages needed in order to stay on node */
+       const unsigned long nr_pages = 1 << order;
+       struct scan_control sc = {
+               .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
+               .gfp_mask = current_gfp_context(gfp_mask),
+               .order = order,
+               .priority = NODE_RECLAIM_PRIORITY,
+               .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
+               .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
+               .may_swap = 1,
+               .reclaim_idx = gfp_zone(gfp_mask),
+       };
 
        /*
         * Node reclaim reclaims unmapped file backed pages and
@@ -7708,7 +7710,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
        if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
                return NODE_RECLAIM_NOSCAN;
 
-       ret = __node_reclaim(pgdat, gfp_mask, order);
+       ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages;
        clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
 
        if (ret)