]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
slab: separate struct freelist_tid from kmem_cache_cpu
authorVlastimil Babka <vbabka@suse.cz>
Fri, 7 Nov 2025 13:51:23 +0000 (14:51 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Mon, 10 Nov 2025 14:35:21 +0000 (15:35 +0100)
In kmem_cache_cpu we currently have a union of the freelist+tid pair
with freelist_aba_t, relying implicitly on the type compatibility with the
freelist+counters pair used in freelist_aba_t.

To allow further changes to freelist_aba_t, we can instead define a
separate struct freelist_tid (instead of a typedef, per the coding
style) for kmem_cache_cpu, as that affects only a single helper
__update_cpu_freelist_fast().

We can add the resulting struct freelist_tid to kmem_cache_cpu as
unnamed field thanks to -fms-extensions, so that freelist and tid fields
can still be accessed directly.

Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 074abe8e79f89bbd84677db226298b3efce22d01..5f6408c9e0fdaec618fe7de0a8fa991f47a72ca6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -411,18 +411,22 @@ enum stat_item {
 };
 
 #ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
+struct freelist_tid {
        union {
                struct {
-                       void **freelist;        /* Pointer to next available object */
+                       void *freelist;         /* Pointer to next available object */
                        unsigned long tid;      /* Globally unique transaction id */
                };
-               freelist_aba_t freelist_tid;
+               freelist_full_t freelist_tid;
        };
+};
+
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
+struct kmem_cache_cpu {
+       struct freelist_tid;
        struct slab *slab;      /* The slab from which we are allocating */
 #ifdef CONFIG_SLUB_CPU_PARTIAL
        struct slab *partial;   /* Partially allocated slabs */
@@ -4367,11 +4371,11 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
                           void *freelist_old, void *freelist_new,
                           unsigned long tid)
 {
-       freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
-       freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
+       struct freelist_tid old = { .freelist = freelist_old, .tid = tid };
+       struct freelist_tid new = { .freelist = freelist_new, .tid = next_tid(tid) };
 
-       return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
-                                            &old.full, new.full);
+       return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid,
+                                            &old.freelist_tid, new.freelist_tid);
 }
 
 /*