]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/slab: move struct kmem_cache_cpu declaration to slub.c
authorVlastimil Babka <vbabka@suse.cz>
Tue, 3 Oct 2023 07:54:15 +0000 (09:54 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Wed, 6 Dec 2023 10:57:21 +0000 (11:57 +0100)
Nothing outside SLUB itself accesses the struct kmem_cache_cpu fields so
it does not need to be declared in slub_def.h. This allows also to move
enum stat_item.

Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: David Rientjes <rientjes@google.com>
Tested-by: David Rientjes <rientjes@google.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/slub_def.h
mm/slub.c

index deb90cf4bffb05ecbc69e8654431f470f8faf6e0..a0229ea429770be49bab3f41385aef9a30a78326 100644 (file)
 #include <linux/reciprocal_div.h>
 #include <linux/local_lock.h>
 
-enum stat_item {
-       ALLOC_FASTPATH,         /* Allocation from cpu slab */
-       ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
-       FREE_FASTPATH,          /* Free to cpu slab */
-       FREE_SLOWPATH,          /* Freeing not to cpu slab */
-       FREE_FROZEN,            /* Freeing to frozen slab */
-       FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
-       FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
-       ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
-       ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
-       ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
-       ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
-       FREE_SLAB,              /* Slab freed to the page allocator */
-       CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
-       DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
-       DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
-       DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
-       DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
-       DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
-       DEACTIVATE_BYPASS,      /* Implicit deactivation */
-       ORDER_FALLBACK,         /* Number of times fallback was necessary */
-       CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
-       CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
-       CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
-       CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
-       CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
-       CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
-       NR_SLUB_STAT_ITEMS
-};
-
-#ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
-       union {
-               struct {
-                       void **freelist;        /* Pointer to next available object */
-                       unsigned long tid;      /* Globally unique transaction id */
-               };
-               freelist_aba_t freelist_tid;
-       };
-       struct slab *slab;      /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-       struct slab *partial;   /* Partially allocated frozen slabs */
-#endif
-       local_lock_t lock;      /* Protects the fields above */
-#ifdef CONFIG_SLUB_STATS
-       unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-#endif /* CONFIG_SLUB_TINY */
-
 #ifdef CONFIG_SLUB_CPU_PARTIAL
 #define slub_percpu_partial(c)         ((c)->partial)
 
index 3e01731783df86d33867ce449124ef21229dfc91..979932d046fdef206cbd018c3f77ef2677cf175d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -330,6 +330,60 @@ static void debugfs_slab_add(struct kmem_cache *);
 static inline void debugfs_slab_add(struct kmem_cache *s) { }
 #endif
 
+enum stat_item {
+       ALLOC_FASTPATH,         /* Allocation from cpu slab */
+       ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
+       FREE_FASTPATH,          /* Free to cpu slab */
+       FREE_SLOWPATH,          /* Freeing not to cpu slab */
+       FREE_FROZEN,            /* Freeing to frozen slab */
+       FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
+       FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
+       ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
+       ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
+       ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
+       ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
+       FREE_SLAB,              /* Slab freed to the page allocator */
+       CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
+       DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
+       DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
+       DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
+       DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
+       DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+       DEACTIVATE_BYPASS,      /* Implicit deactivation */
+       ORDER_FALLBACK,         /* Number of times fallback was necessary */
+       CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
+       CMPXCHG_DOUBLE_FAIL,    /* Failures of slab freelist update */
+       CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
+       CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
+       CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
+       CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
+       NR_SLUB_STAT_ITEMS
+};
+
+#ifndef CONFIG_SLUB_TINY
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
+struct kmem_cache_cpu {
+       union {
+               struct {
+                       void **freelist;        /* Pointer to next available object */
+                       unsigned long tid;      /* Globally unique transaction id */
+               };
+               freelist_aba_t freelist_tid;
+       };
+       struct slab *slab;      /* The slab from which we are allocating */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+       struct slab *partial;   /* Partially allocated frozen slabs */
+#endif
+       local_lock_t lock;      /* Protects the fields above */
+#ifdef CONFIG_SLUB_STATS
+       unsigned int stat[NR_SLUB_STAT_ITEMS];
+#endif
+};
+#endif /* CONFIG_SLUB_TINY */
+
 static inline void stat(const struct kmem_cache *s, enum stat_item si)
 {
 #ifdef CONFIG_SLUB_STATS