]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/filemap: Extend __filemap_get_folio() to support NUMA memory policies
authorMatthew Wilcox <willy@infradead.org>
Wed, 27 Aug 2025 17:52:44 +0000 (17:52 +0000)
committerSean Christopherson <seanjc@google.com>
Mon, 20 Oct 2025 13:30:25 +0000 (06:30 -0700)
Extend __filemap_get_folio() to support NUMA memory policies by
renaming the implementation to __filemap_get_folio_mpol() and adding
a mempolicy parameter. The original function becomes a static inline
wrapper that passes NULL for the mempolicy.

This infrastructure will enable future support for NUMA-aware page cache
allocations in guest_memfd memory backend KVM guests.

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
Tested-by: Ashish Kalra <ashish.kalra@amd.com>
Link: https://lore.kernel.org/r/20250827175247.83322-5-shivankg@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
include/linux/pagemap.h
mm/filemap.c

index f1d0610210f7e9c0a2b96e6b1d58687410544a16..a17fabbc0269f2c60f50da2ae9ac6da4eb147df1 100644 (file)
@@ -755,11 +755,17 @@ static inline fgf_t fgf_set_order(size_t size)
 }
 
 void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
-struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
-               fgf_t fgp_flags, gfp_t gfp);
+struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
+               pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy);
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
                fgf_t fgp_flags, gfp_t gfp);
 
+static inline struct folio *__filemap_get_folio(struct address_space *mapping,
+               pgoff_t index, fgf_t fgf_flags, gfp_t gfp)
+{
+       return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL);
+}
+
 /**
  * write_begin_get_folio - Get folio for write_begin with flags.
  * @iocb: The kiocb passed from write_begin (may be NULL).
index 7b42fd6dcc9a495de57775692ad70917efe310ca..91c4537283d3a169fc763b406dd1376672a75007 100644 (file)
@@ -1928,11 +1928,12 @@ out:
 }
 
 /**
- * __filemap_get_folio - Find and get a reference to a folio.
+ * __filemap_get_folio_mpol - Find and get a reference to a folio.
  * @mapping: The address_space to search.
  * @index: The page index.
  * @fgp_flags: %FGP flags modify how the folio is returned.
  * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
+ * @policy: NUMA memory allocation policy to follow.
  *
  * Looks up the page cache entry at @mapping & @index.
  *
@@ -1943,8 +1944,8 @@ out:
  *
  * Return: The found folio or an ERR_PTR() otherwise.
  */
-struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
-               fgf_t fgp_flags, gfp_t gfp)
+struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
+               pgoff_t index, fgf_t fgp_flags, gfp_t gfp, struct mempolicy *policy)
 {
        struct folio *folio;
 
@@ -2014,7 +2015,7 @@ no_page:
                        err = -ENOMEM;
                        if (order > min_order)
                                alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
-                       folio = filemap_alloc_folio(alloc_gfp, order, NULL);
+                       folio = filemap_alloc_folio(alloc_gfp, order, policy);
                        if (!folio)
                                continue;
 
@@ -2061,7 +2062,7 @@ no_page:
                folio_clear_dropbehind(folio);
        return folio;
 }
-EXPORT_SYMBOL(__filemap_get_folio);
+EXPORT_SYMBOL(__filemap_get_folio_mpol);
 
 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
                xa_mark_t mark)