]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
slab: Remove folio references from free_large_kmalloc()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 13 Nov 2025 00:09:20 +0000 (00:09 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 13 Nov 2025 10:01:08 +0000 (11:01 +0100)
There's no need to use folio APIs here; just use a page directly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20251113000932.1589073-7-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 458e58e374806eb99f9f5e89e01afad3806afde2..79b71ee47f6395b65fd33bb4e0d3dac62c37f65f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6742,12 +6742,12 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
-static void free_large_kmalloc(struct folio *folio, void *object)
+static void free_large_kmalloc(struct page *page, void *object)
 {
-       unsigned int order = folio_order(folio);
+       unsigned int order = compound_order(page);
 
-       if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
-               dump_page(&folio->page, "Not a kmalloc allocation");
+       if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
+               dump_page(page, "Not a kmalloc allocation");
                return;
        }
 
@@ -6758,10 +6758,10 @@ static void free_large_kmalloc(struct folio *folio, void *object)
        kasan_kfree_large(object);
        kmsan_kfree_large(object);
 
-       lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+       mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
                              -(PAGE_SIZE << order));
-       __folio_clear_large_kmalloc(folio);
-       free_frozen_pages(&folio->page, order);
+       __ClearPageLargeKmalloc(page);
+       free_frozen_pages(page, order);
 }
 
 /*
@@ -6789,7 +6789,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
                 * consider folio order
                 */
                obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
-               free_large_kmalloc(folio, obj);
+               free_large_kmalloc(&folio->page, obj);
                return;
        }
 
@@ -6829,7 +6829,7 @@ void kfree(const void *object)
 
        folio = virt_to_folio(object);
        if (unlikely(!folio_test_slab(folio))) {
-               free_large_kmalloc(folio, (void *)object);
+               free_large_kmalloc(&folio->page, (void *)object);
                return;
        }
 
@@ -7253,7 +7253,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!folio_test_slab(folio))) {
-                       free_large_kmalloc(folio, object);
+                       free_large_kmalloc(&folio->page, object);
                        df->slab = NULL;
                        return size;
                }