]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/slub: remove DEACTIVATE_TO_* stat items
authorVlastimil Babka <vbabka@suse.cz>
Fri, 23 Jan 2026 06:52:59 +0000 (07:52 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 29 Jan 2026 08:29:41 +0000 (09:29 +0100)
The cpu slabs and their deactivations were removed, so remove the unused
stat items. Weirdly enough the values were also used to control
__add_partial() adding to head or tail of the list, so replace that with
a new enum add_mode, which is cleaner.

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index bbc9d56484e5e1e6c92a2b0a9e84a3b7cae7eb60..2ec4bcfb37596009805c95e75af57919cf2bb73e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -329,6 +329,11 @@ static void debugfs_slab_add(struct kmem_cache *);
 static inline void debugfs_slab_add(struct kmem_cache *s) { }
 #endif
 
+enum add_mode {
+       ADD_TO_HEAD,
+       ADD_TO_TAIL,
+};
+
 enum stat_item {
        ALLOC_PCS,              /* Allocation from percpu sheaf */
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
@@ -348,8 +353,6 @@ enum stat_item {
        CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
        DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
        DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
-       DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
-       DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
        DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
        DEACTIVATE_BYPASS,      /* Implicit deactivation */
        ORDER_FALLBACK,         /* Number of times fallback was necessary */
@@ -3270,10 +3273,10 @@ static inline void slab_clear_node_partial(struct slab *slab)
  * Management of partially allocated slabs.
  */
 static inline void
-__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
+__add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
 {
        n->nr_partial++;
-       if (tail == DEACTIVATE_TO_TAIL)
+       if (mode == ADD_TO_TAIL)
                list_add_tail(&slab->slab_list, &n->partial);
        else
                list_add(&slab->slab_list, &n->partial);
@@ -3281,10 +3284,10 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
 }
 
 static inline void add_partial(struct kmem_cache_node *n,
-                               struct slab *slab, int tail)
+                               struct slab *slab, enum add_mode mode)
 {
        lockdep_assert_held(&n->list_lock);
-       __add_partial(n, slab, tail);
+       __add_partial(n, slab, mode);
 }
 
 static inline void remove_partial(struct kmem_cache_node *n,
@@ -3377,7 +3380,7 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
        if (slab->inuse == slab->objects)
                add_full(s, n, slab);
        else
-               add_partial(n, slab, DEACTIVATE_TO_HEAD);
+               add_partial(n, slab, ADD_TO_HEAD);
 
        inc_slabs_node(s, nid, slab->objects);
        spin_unlock_irqrestore(&n->list_lock, flags);
@@ -3999,7 +4002,7 @@ static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
                        n = get_node(s, slab_nid(slab));
                        spin_lock_irqsave(&n->list_lock, flags);
                }
-               add_partial(n, slab, DEACTIVATE_TO_HEAD);
+               add_partial(n, slab, ADD_TO_HEAD);
                spin_unlock_irqrestore(&n->list_lock, flags);
        }
 
@@ -5070,7 +5073,7 @@ static noinline void free_to_partial_list(
                        /* was on full list */
                        remove_full(s, n, slab);
                        if (!slab_free) {
-                               add_partial(n, slab, DEACTIVATE_TO_TAIL);
+                               add_partial(n, slab, ADD_TO_TAIL);
                                stat(s, FREE_ADD_PARTIAL);
                        }
                } else if (slab_free) {
@@ -5190,7 +5193,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
         * then add it.
         */
        if (unlikely(was_full)) {
-               add_partial(n, slab, DEACTIVATE_TO_TAIL);
+               add_partial(n, slab, ADD_TO_TAIL);
                stat(s, FREE_ADD_PARTIAL);
        }
        spin_unlock_irqrestore(&n->list_lock, flags);
@@ -6605,7 +6608,7 @@ __refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int mi
                                continue;
 
                        list_del(&slab->slab_list);
-                       add_partial(n, slab, DEACTIVATE_TO_HEAD);
+                       add_partial(n, slab, ADD_TO_HEAD);
                }
 
                spin_unlock_irqrestore(&n->list_lock, flags);
@@ -7072,7 +7075,7 @@ static void early_kmem_cache_node_alloc(int node)
         * No locks need to be taken here as it has just been
         * initialized and there is no concurrent access.
         */
-       __add_partial(n, slab, DEACTIVATE_TO_HEAD);
+       __add_partial(n, slab, ADD_TO_HEAD);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -8764,8 +8767,6 @@ STAT_ATTR(FREE_SLAB, free_slab);
 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
-STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
-STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
 STAT_ATTR(ORDER_FALLBACK, order_fallback);
@@ -8868,8 +8869,6 @@ static struct attribute *slab_attrs[] = {
        &cpuslab_flush_attr.attr,
        &deactivate_full_attr.attr,
        &deactivate_empty_attr.attr,
-       &deactivate_to_head_attr.attr,
-       &deactivate_to_tail_attr.attr,
        &deactivate_remote_frees_attr.attr,
        &deactivate_bypass_attr.attr,
        &order_fallback_attr.attr,