From: Vlastimil Babka Date: Sun, 8 Feb 2026 18:17:42 +0000 (+0100) Subject: Merge branch 'slab/for-7.0/sheaves' into slab/for-next X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=815c8e35511d0b9a214e9f644983fe477af9d5cb;p=thirdparty%2Fkernel%2Flinux.git Merge branch 'slab/for-7.0/sheaves' into slab/for-next Merge series "slab: replace cpu (partial) slabs with sheaves". The percpu sheaves caching layer was introduced as opt-in but the goal was to eventually move all caches to them. This is the next step, enabling sheaves for all caches (except the two bootstrap ones) and then removing the per cpu (partial) slabs and lots of associated code. Besides the lower locking overhead and much more likely fastpath when freeing, this removes the rather complicated code related to the cpu slab lockless fastpaths (using this_cpu_try_cmpxchg128/64) and all its complications for PREEMPT_RT or kmalloc_nolock(). The lockless slab freelist+counters update operation using try_cmpxchg128/64 remains and is crucial for freeing remote NUMA objects and to allow flushing objects from sheaves to slabs mostly without the node list_lock. Link: https://lore.kernel.org/all/20260123-sheaves-for-all-v4-0-041323d506f7@suse.cz/ --- 815c8e35511d0b9a214e9f644983fe477af9d5cb diff --cc include/linux/slab.h index 34db237319c1b,2682ee57ec909..a0081642606b5 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@@ -57,12 -57,7 +57,10 @@@ enum _slab_flag_bits #endif _SLAB_OBJECT_POISON, _SLAB_CMPXCHG_DOUBLE, - #ifdef CONFIG_SLAB_OBJ_EXT _SLAB_NO_OBJ_EXT, - #endif +#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) + _SLAB_OBJ_EXT_IN_OBJ, +#endif _SLAB_FLAGS_LAST_BIT }; @@@ -241,18 -236,8 +239,14 @@@ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* Slab created using create_boot_cache */ - #ifdef CONFIG_SLAB_OBJ_EXT #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) - #else - #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED - #endif +#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) +#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ) +#else +#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_UNUSED +#endif + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. *