]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/filemap: Add NUMA mempolicy support to filemap_alloc_folio()
authorMatthew Wilcox <willy@infradead.org>
Wed, 27 Aug 2025 17:52:43 +0000 (17:52 +0000)
committerSean Christopherson <seanjc@google.com>
Mon, 20 Oct 2025 13:30:25 +0000 (06:30 -0700)
Add a mempolicy parameter to filemap_alloc_folio() to enable NUMA-aware
page cache allocations. This will be used by upcoming changes to
support NUMA policies in guest-memfd, where guest_memory need to be
allocated NUMA policy specified by VMM.

All existing users pass NULL maintaining current behavior.

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
Tested-by: Ashish Kalra <ashish.kalra@amd.com>
Link: https://lore.kernel.org/r/20250827175247.83322-4-shivankg@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
fs/btrfs/compression.c
fs/btrfs/verity.c
fs/erofs/zdata.c
fs/f2fs/compress.c
include/linux/pagemap.h
mm/filemap.c
mm/readahead.c

index bacad18357b3386cb38b8c0b3ddebbb5baf76255..d927ae32e7d0b5c9ee0a089fe1d4caa32ebb0abb 100644 (file)
@@ -491,8 +491,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                        continue;
                }
 
-               folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
-                                                                  ~__GFP_FS), 0);
+               folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS),
+                                           0, NULL);
                if (!folio)
                        break;
 
index 46bd8ca586708514d566060a3d0b42904db13036..d4523d5debcd487d81f179cd957361ae67dd3a45 100644 (file)
@@ -742,7 +742,7 @@ again:
        }
 
        folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
-                                   0);
+                                   0, NULL);
        if (!folio)
                return ERR_PTR(-ENOMEM);
 
index bc80cfe482f73bdb4ac5a8810c39e95c5089f4ff..b7369fb4fbe9215655d318631aba88031cc719e7 100644 (file)
@@ -562,7 +562,7 @@ static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
                         * Allocate a managed folio for cached I/O, or it may be
                         * then filled with a file-backed folio for in-place I/O
                         */
-                       newfolio = filemap_alloc_folio(gfp, 0);
+                       newfolio = filemap_alloc_folio(gfp, 0, NULL);
                        if (!newfolio)
                                continue;
                        newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
index 6ad8d3bc6df7a8dcb304e8dd9795588d0802ec02..a65e8cd388bca32d7e7c647494ff9bd7b2b91a5a 100644 (file)
@@ -1947,7 +1947,7 @@ static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
                return;
        }
 
-       cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0);
+       cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL);
        if (!cfolio)
                return;
 
index 09b581c1d878d3b59aa7018ca0c3e82b3c774f7c..f1d0610210f7e9c0a2b96e6b1d58687410544a16 100644 (file)
@@ -654,9 +654,11 @@ static inline void *detach_page_private(struct page *page)
 }
 
 #ifdef CONFIG_NUMA
-struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
+struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+               struct mempolicy *policy);
 #else
-static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
+static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+               struct mempolicy *policy)
 {
        return folio_alloc_noprof(gfp, order);
 }
@@ -667,7 +669,7 @@ static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int o
 
 static inline struct page *__page_cache_alloc(gfp_t gfp)
 {
-       return &filemap_alloc_folio(gfp, 0)->page;
+       return &filemap_alloc_folio(gfp, 0, NULL)->page;
 }
 
 static inline gfp_t readahead_gfp_mask(struct address_space *x)
index 13f0259d993c9c0431e51b22a42fe0f644a4546a..7b42fd6dcc9a495de57775692ad70917efe310ca 100644 (file)
@@ -1002,11 +1002,16 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
 EXPORT_SYMBOL_GPL(filemap_add_folio);
 
 #ifdef CONFIG_NUMA
-struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
+struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+               struct mempolicy *policy)
 {
        int n;
        struct folio *folio;
 
+       if (policy)
+               return folio_alloc_mpol_noprof(gfp, order, policy,
+                               NO_INTERLEAVE_INDEX, numa_node_id());
+
        if (cpuset_do_page_mem_spread()) {
                unsigned int cpuset_mems_cookie;
                do {
@@ -2009,7 +2014,7 @@ no_page:
                        err = -ENOMEM;
                        if (order > min_order)
                                alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
-                       folio = filemap_alloc_folio(alloc_gfp, order);
+                       folio = filemap_alloc_folio(alloc_gfp, order, NULL);
                        if (!folio)
                                continue;
 
@@ -2551,7 +2556,7 @@ static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch)
        if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
                return -EAGAIN;
 
-       folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
+       folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order, NULL);
        if (!folio)
                return -ENOMEM;
        if (iocb->ki_flags & IOCB_DONTCACHE)
@@ -3983,8 +3988,7 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
 repeat:
        folio = filemap_get_folio(mapping, index);
        if (IS_ERR(folio)) {
-               folio = filemap_alloc_folio(gfp,
-                                           mapping_min_folio_order(mapping));
+               folio = filemap_alloc_folio(gfp, mapping_min_folio_order(mapping), NULL);
                if (!folio)
                        return ERR_PTR(-ENOMEM);
                index = mapping_align_index(mapping, index);
index 3a4b5d58eeb6fe266fbfc4dd1c6033e016915ead..b415c99691762f45fa0501106212f581ec94adee 100644 (file)
@@ -186,7 +186,7 @@ static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
 {
        struct folio *folio;
 
-       folio = filemap_alloc_folio(gfp_mask, order);
+       folio = filemap_alloc_folio(gfp_mask, order, NULL);
        if (folio && ractl->dropbehind)
                __folio_set_dropbehind(folio);