]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
slab: add sheaves to most caches
authorVlastimil Babka <vbabka@suse.cz>
Fri, 23 Jan 2026 06:52:44 +0000 (07:52 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 29 Jan 2026 08:17:28 +0000 (09:17 +0100)
In the first step to replace cpu (partial) slabs with sheaves, enable
sheaves for almost all caches. Treat args->sheaf_capacity as a minimum,
and calculate sheaf capacity with a formula that roughly follows the
formula for number of objects in cpu partial slabs in set_cpu_partial().

This should achieve roughly similar contention on the barn spin lock as
there's currently for node list_lock without sheaves, to make
benchmarking results comparable. It can be further tuned later.

Don't enable sheaves for bootstrap caches as that wouldn't work. In
order to recognize them by SLAB_NO_OBJ_EXT, make sure the flag exists
even for !CONFIG_SLAB_OBJ_EXT.

This limitation will be lifted for kmalloc caches after the necessary
bootstrapping changes.

Also do not enable sheaves for SLAB_NOLEAKTRACE caches to avoid
recursion with kmemleak tracking (thanks to Breno Leitao).

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Tested-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Tested-by: Zhao Liu <zhao1.liu@intel.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/slab.h
mm/slub.c

index 2482992248dc9c48b6d25d2aff8bf196fa3d0e1e..2682ee57ec909927d4bfbfb962d0f85a56f6d916 100644 (file)
@@ -57,9 +57,7 @@ enum _slab_flag_bits {
 #endif
        _SLAB_OBJECT_POISON,
        _SLAB_CMPXCHG_DOUBLE,
-#ifdef CONFIG_SLAB_OBJ_EXT
        _SLAB_NO_OBJ_EXT,
-#endif
        _SLAB_FLAGS_LAST_BIT
 };
 
@@ -238,11 +236,7 @@ enum _slab_flag_bits {
 #define SLAB_TEMPORARY         SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
 
 /* Slab created using create_boot_cache */
-#ifdef CONFIG_SLAB_OBJ_EXT
 #define SLAB_NO_OBJ_EXT                __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
-#else
-#define SLAB_NO_OBJ_EXT                __SLAB_FLAG_UNUSED
-#endif
 
 /*
  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
index e90f3e558ae7d291990f836f6a344200c11dcd8d..b6c3071147561ad22c605cecc742d2c490ccbaad 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -7893,6 +7893,53 @@ static void set_cpu_partial(struct kmem_cache *s)
 #endif
 }
 
+static unsigned int calculate_sheaf_capacity(struct kmem_cache *s,
+                                            struct kmem_cache_args *args)
+
+{
+       unsigned int capacity;
+       size_t size;
+
+
+       if (IS_ENABLED(CONFIG_SLUB_TINY) || s->flags & SLAB_DEBUG_FLAGS)
+               return 0;
+
+       /*
+        * Bootstrap caches can't have sheaves for now (SLAB_NO_OBJ_EXT).
+        * SLAB_NOLEAKTRACE caches (e.g., kmemleak's object_cache) must not
+        * have sheaves to avoid recursion when sheaf allocation triggers
+        * kmemleak tracking.
+        */
+       if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
+               return 0;
+
+       /*
+        * For now we use roughly similar formula (divided by two as there are
+        * two percpu sheaves) as what was used for percpu partial slabs, which
+        * should result in similar lock contention (barn or list_lock)
+        */
+       if (s->size >= PAGE_SIZE)
+               capacity = 4;
+       else if (s->size >= 1024)
+               capacity = 12;
+       else if (s->size >= 256)
+               capacity = 26;
+       else
+               capacity = 60;
+
+       /* Increment capacity to make sheaf exactly a kmalloc size bucket */
+       size = struct_size_t(struct slab_sheaf, objects, capacity);
+       size = kmalloc_size_roundup(size);
+       capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *);
+
+       /*
+        * Respect an explicit request for capacity that's typically motivated by
+        * expected maximum size of kmem_cache_prefill_sheaf() to not end up
+        * using low-performance oversize sheaves
+        */
+       return max(capacity, args->sheaf_capacity);
+}
+
 /*
  * calculate_sizes() determines the order and the distribution of data within
  * a slab object.
@@ -8027,6 +8074,10 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                s->allocflags |= __GFP_RECLAIMABLE;
 
+       /* kmalloc caches need extra care to support sheaves */
+       if (!is_kmalloc_cache(s))
+               s->sheaf_capacity = calculate_sheaf_capacity(s, args);
+
        /*
         * Determine the number of objects per slab
         */
@@ -8631,15 +8682,12 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
 
        set_cpu_partial(s);
 
-       if (args->sheaf_capacity && !IS_ENABLED(CONFIG_SLUB_TINY)
-                                       && !(s->flags & SLAB_DEBUG_FLAGS)) {
+       if (s->sheaf_capacity) {
                s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
                if (!s->cpu_sheaves) {
                        err = -ENOMEM;
                        goto out;
                }
-               // TODO: increase capacity to grow slab_sheaf up to next kmalloc size?
-               s->sheaf_capacity = args->sheaf_capacity;
        }
 
 #ifdef CONFIG_NUMA