]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/mempolicy: add alloc_frozen_pages()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 25 Nov 2024 21:01:46 +0000 (21:01 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 14 Jan 2025 06:40:33 +0000 (22:40 -0800)
Provide an interface to allocate pages from the page allocator without
incrementing their refcount.  This saves an atomic operation on free,
which may be beneficial to some users (eg slab).

Link: https://lkml.kernel.org/r/20241125210149.2976098-15-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/mempolicy.c

index b831688a71e8165bd63a2b92193e78248dd0eadf..6f6585e98c6f9d6bc119e26a5624372f4f13a7b4 100644 (file)
@@ -747,6 +747,18 @@ struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
 void free_frozen_pages(struct page *page, unsigned int order);
 void free_unref_folios(struct folio_batch *fbatch);
 
+#ifdef CONFIG_NUMA
+struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
+#else
+static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
+{
+       return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
+}
+#endif
+
+#define alloc_frozen_pages(...) \
+       alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
+
 extern void zone_pcp_reset(struct zone *zone);
 extern void zone_pcp_disable(struct zone *zone);
 extern void zone_pcp_enable(struct zone *zone);
index e092aff55e2d887ce52ab7369c8901c9e5878f4f..305aa30121733aeebe7309bd76f77723eb8879bc 100644 (file)
@@ -2205,9 +2205,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
         */
        preferred_gfp = gfp | __GFP_NOWARN;
        preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
-       page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
+       page = __alloc_frozen_pages_noprof(preferred_gfp, order, nid, nodemask);
        if (!page)
-               page = __alloc_pages_noprof(gfp, order, nid, NULL);
+               page = __alloc_frozen_pages_noprof(gfp, order, nid, NULL);
 
        return page;
 }
@@ -2253,8 +2253,9 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
                         * First, try to allocate THP only on local node, but
                         * don't reclaim unnecessarily, just compact.
                         */
-                       page = __alloc_pages_node_noprof(nid,
-                               gfp | __GFP_THISNODE | __GFP_NORETRY, order);
+                       page = __alloc_frozen_pages_noprof(
+                               gfp | __GFP_THISNODE | __GFP_NORETRY, order,
+                               nid, NULL);
                        if (page || !(gfp & __GFP_DIRECT_RECLAIM))
                                return page;
                        /*
@@ -2266,7 +2267,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
                }
        }
 
-       page = __alloc_pages_noprof(gfp, order, nid, nodemask);
+       page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask);
 
        if (unlikely(pol->mode == MPOL_INTERLEAVE ||
                     pol->mode == MPOL_WEIGHTED_INTERLEAVE) && page) {
@@ -2285,8 +2286,13 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
 struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
                struct mempolicy *pol, pgoff_t ilx, int nid)
 {
-       return page_rmappable_folio(alloc_pages_mpol(gfp | __GFP_COMP,
-                                                       order, pol, ilx, nid));
+       struct page *page = alloc_pages_mpol(gfp | __GFP_COMP, order, pol,
+                       ilx, nid);
+       if (!page)
+               return NULL;
+
+       set_page_refcounted(page);
+       return page_rmappable_folio(page);
 }
 
 /**
@@ -2321,6 +2327,21 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct
 }
 EXPORT_SYMBOL(vma_alloc_folio_noprof);
 
+struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned order)
+{
+       struct mempolicy *pol = &default_policy;
+
+       /*
+        * No reference counting needed for current->mempolicy
+        * nor system default_policy
+        */
+       if (!in_interrupt() && !(gfp & __GFP_THISNODE))
+               pol = get_task_policy(current);
+
+       return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX,
+                                      numa_node_id());
+}
+
 /**
  * alloc_pages - Allocate pages.
  * @gfp: GFP flags.
@@ -2337,17 +2358,11 @@ EXPORT_SYMBOL(vma_alloc_folio_noprof);
  */
 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
 {
-       struct mempolicy *pol = &default_policy;
-
-       /*
-        * No reference counting needed for current->mempolicy
-        * nor system default_policy
-        */
-       if (!in_interrupt() && !(gfp & __GFP_THISNODE))
-               pol = get_task_policy(current);
+       struct page *page = alloc_frozen_pages_noprof(gfp, order);
 
-       return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX,
-                                      numa_node_id());
+       if (page)
+               set_page_refcounted(page);
+       return page;
 }
 EXPORT_SYMBOL(alloc_pages_noprof);