]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
slab: Remove folio references from kvfree_rcu_cb()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 13 Nov 2025 00:09:21 +0000 (00:09 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 13 Nov 2025 10:01:08 +0000 (11:01 +0100)
Remove conversions from folio to page and folio to slab.  This is
preparation for separately allocated struct slab from struct page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://patch.msgid.link/20251113000932.1589073-8-willy@infradead.org
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 79b71ee47f6395b65fd33bb4e0d3dac62c37f65f..56c7ddff43fae26313434da38269625ba8e1991f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6771,7 +6771,7 @@ static void free_large_kmalloc(struct page *page, void *object)
 void kvfree_rcu_cb(struct rcu_head *head)
 {
        void *obj = head;
-       struct folio *folio;
+       struct page *page;
        struct slab *slab;
        struct kmem_cache *s;
        void *slab_addr;
@@ -6782,20 +6782,20 @@ void kvfree_rcu_cb(struct rcu_head *head)
                return;
        }
 
-       folio = virt_to_folio(obj);
-       if (!folio_test_slab(folio)) {
+       page = virt_to_page(obj);
+       slab = page_slab(page);
+       if (!slab) {
                /*
                 * rcu_head offset can be only less than page size so no need to
-                * consider folio order
+                * consider allocation order
                 */
                obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
-               free_large_kmalloc(&folio->page, obj);
+               free_large_kmalloc(page, obj);
                return;
        }
 
-       slab = folio_slab(folio);
        s = slab->slab_cache;
-       slab_addr = folio_address(folio);
+       slab_addr = slab_address(slab);
 
        if (is_kfence_address(obj)) {
                obj = kfence_object_start(obj);