]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
slab: remove frozen slab checks from __slab_free()
authorVlastimil Babka <vbabka@suse.cz>
Fri, 23 Jan 2026 06:52:58 +0000 (07:52 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 29 Jan 2026 08:29:27 +0000 (09:29 +0100)
Currently slabs are only frozen after consistency checks failed. This
can happen only in caches with debugging enabled, and those use
free_to_partial_list() for freeing. The non-debug operation of
__slab_free() can thus stop considering the frozen field, and we can
remove the FREE_FROZEN stat.

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index d782ceb8a2baec56fa9092bfbe83d7803629cf76..bbc9d56484e5e1e6c92a2b0a9e84a3b7cae7eb60 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -338,7 +338,6 @@ enum stat_item {
        FREE_RCU_SHEAF_FAIL,    /* Failed to free to a rcu_free sheaf */
        FREE_FASTPATH,          /* Free to cpu slab */
        FREE_SLOWPATH,          /* Freeing not to cpu slab */
-       FREE_FROZEN,            /* Freeing to frozen slab */
        FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
        FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
        ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
@@ -5109,7 +5108,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
                        unsigned long addr)
 
 {
-       bool was_frozen, was_full;
+       bool was_full;
        struct freelist_counters old, new;
        struct kmem_cache_node *n = NULL;
        unsigned long flags;
@@ -5132,7 +5131,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
                old.counters = slab->counters;
 
                was_full = (old.freelist == NULL);
-               was_frozen = old.frozen;
 
                set_freepointer(s, tail, old.freelist);
 
@@ -5145,7 +5143,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
                 * to (due to not being full anymore) the partial list.
                 * Unless it's frozen.
                 */
-               if ((!new.inuse || was_full) && !was_frozen) {
+               if (!new.inuse || was_full) {
 
                        n = get_node(s, slab_nid(slab));
                        /*
@@ -5164,20 +5162,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
        } while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
 
        if (likely(!n)) {
-
-               if (likely(was_frozen)) {
-                       /*
-                        * The list lock was not taken therefore no list
-                        * activity can be necessary.
-                        */
-                       stat(s, FREE_FROZEN);
-               }
-
                /*
-                * In other cases we didn't take the list_lock because the slab
-                * was already on the partial list and will remain there.
+                * We didn't take the list_lock because the slab was already on
+                * the partial list and will remain there.
                 */
-
                return;
        }
 
@@ -8766,7 +8754,6 @@ STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf);
 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail);
 STAT_ATTR(FREE_FASTPATH, free_fastpath);
 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
-STAT_ATTR(FREE_FROZEN, free_frozen);
 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
@@ -8871,7 +8858,6 @@ static struct attribute *slab_attrs[] = {
        &free_rcu_sheaf_fail_attr.attr,
        &free_fastpath_attr.attr,
        &free_slowpath_attr.attr,
-       &free_frozen_attr.attr,
        &free_add_partial_attr.attr,
        &free_remove_partial_attr.attr,
        &alloc_from_partial_attr.attr,