]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
slab: Remove folio references from __ksize()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 13 Nov 2025 00:09:16 +0000 (00:09 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 13 Nov 2025 10:01:08 +0000 (11:01 +0100)
In the future, we will separate slab, folio and page from each other
and calling virt_to_folio() on an address allocated from slab will
return NULL.  Delay the conversion from struct page to struct slab
until we know we're not dealing with a large kmalloc allocation.
There's a minor win for large kmalloc allocations as we avoid the
compound_head() hidden in virt_to_folio().

This deprecates calling ksize() on memory allocated by alloc_pages().
Today it becomes a warning and support will be removed entirely in
the future.

Introduce large_kmalloc_size() to abstract how we represent the size
of a large kmalloc allocation.  For now, this is the same as
page_size(), but it will change with separately allocated memdescs.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://patch.msgid.link/20251113000932.1589073-3-willy@infradead.org
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/page-flags.h
mm/slab.h
mm/slab_common.c

index 6d5e44968eab0a80927f27e9819274f40a9aa1b1..f7a0e4af0c734404ae738670c3aa7d67dd3820c3 100644 (file)
@@ -1064,7 +1064,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
  * Serialized with zone lock.
  */
 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
-FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
+PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
 
 /**
  * PageHuge - Determine if the page belongs to hugetlbfs
index a64b9b2c873160cb850efc657cd9be300198c0ea..31ccf0f6d3a163626ed68eabf9abb02a3de00ff1 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -605,6 +605,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
        return s->size;
 }
 
+static inline unsigned int large_kmalloc_order(const struct page *page)
+{
+       return page[1].flags.f & 0xff;
+}
+
+static inline size_t large_kmalloc_size(const struct page *page)
+{
+       return PAGE_SIZE << large_kmalloc_order(page);
+}
+
 #ifdef CONFIG_SLUB_DEBUG
 void dump_unreclaimable_slab(void);
 #else
index 932d13ada36c0d0c97931f5a2108566922f29236..67ad2328276eb324dbf144ed309169ce545df343 100644 (file)
@@ -997,26 +997,27 @@ void __init create_kmalloc_caches(void)
  */
 size_t __ksize(const void *object)
 {
-       struct folio *folio;
+       const struct page *page;
+       const struct slab *slab;
 
        if (unlikely(object == ZERO_SIZE_PTR))
                return 0;
 
-       folio = virt_to_folio(object);
+       page = virt_to_page(object);
 
-       if (unlikely(!folio_test_slab(folio))) {
-               if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
-                       return 0;
-               if (WARN_ON(object != folio_address(folio)))
-                       return 0;
-               return folio_size(folio);
-       }
+       if (unlikely(PageLargeKmalloc(page)))
+               return large_kmalloc_size(page);
+
+       slab = page_slab(page);
+       /* Delete this after we're sure there are no users */
+       if (WARN_ON(!slab))
+               return page_size(page);
 
 #ifdef CONFIG_SLUB_DEBUG
-       skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+       skip_orig_size_check(slab->slab_cache, object);
 #endif
 
-       return slab_ksize(folio_slab(folio)->slab_cache);
+       return slab_ksize(slab->slab_cache);
 }
 
 gfp_t kmalloc_fix_flags(gfp_t flags)