From 5660ee54e7982f9097ddc684e90f15bdcc7fef4b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 2 Jun 2025 13:02:13 +0200 Subject: [PATCH] mm, slab: use frozen pages for large kmalloc Since slab pages are now frozen, it makes sense to have large kmalloc() objects behave same as small kmalloc(), as the choice between the two is an implementation detail depending on allocation size. Notably, increasing refcount on a slab page containing kmalloc() object is not possible anymore, so it should be consistent for large kmalloc pages. Therefore, change large kmalloc to use the frozen pages API. Because of some unexpected fallout in the slab pages case (see commit b9c0e49abfca ("mm: decline to manipulate the refcount on a slab page"), implement the same kind of checks and warnings as part of this change. Notably, networking code using sendpage_ok() to determine whether the page refcount can be manipulated in the network stack should continue behaving correctly. Before this change, the function returns true for large kmalloc pages and page refcount can be manipulated. After this change, the function will return false. Acked-by: Roman Gushchin Acked-by: Harry Yoo Signed-off-by: Vlastimil Babka --- include/linux/mm.h | 4 +++- mm/slub.c | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0ef2ba0c667af..a35d5958603fc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1325,6 +1325,8 @@ static inline void get_page(struct page *page) struct folio *folio = page_folio(page); if (WARN_ON_ONCE(folio_test_slab(folio))) return; + if (WARN_ON_ONCE(folio_test_large_kmalloc(folio))) + return; folio_get(folio); } @@ -1419,7 +1421,7 @@ static inline void put_page(struct page *page) { struct folio *folio = page_folio(page); - if (folio_test_slab(folio)) + if (folio_test_slab(folio) || folio_test_large_kmalloc(folio)) return; folio_put(folio); diff --git a/mm/slub.c b/mm/slub.c index 06d64a5fb1bf4..823042efbfc98 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4271,9 +4271,9 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) flags |= __GFP_COMP; if (node == NUMA_NO_NODE) - folio = (struct folio *)alloc_pages_noprof(flags, order); + folio = (struct folio *)alloc_frozen_pages_noprof(flags, order); else - folio = (struct folio *)__alloc_pages_noprof(flags, order, node, NULL); + folio = (struct folio *)__alloc_frozen_pages_noprof(flags, order, node, NULL); if (folio) { ptr = folio_address(folio); @@ -4770,7 +4770,7 @@ static void free_large_kmalloc(struct folio *folio, void *object) lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); __folio_clear_large_kmalloc(folio); - folio_put(folio); + free_frozen_pages(&folio->page, order); } /* -- 2.47.2