]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
slab: pull kmem_cache_open() into do_kmem_cache_create()
authorChristian Brauner <brauner@kernel.org>
Thu, 5 Sep 2024 07:56:50 +0000 (09:56 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Tue, 10 Sep 2024 09:42:58 +0000 (11:42 +0200)
do_kmem_cache_create() is the only caller and we're going to pass down
struct kmem_cache_args in a follow-up patch.

Reviewed-by: Kees Cook <kees@kernel.org>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index 23d9d783ff26e0878a49f711309460ddd3383e8d..30f4ca6335c7fb813214085783033155eb51ac52 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5290,65 +5290,6 @@ static int calculate_sizes(struct kmem_cache *s)
        return !!oo_objects(s->oo);
 }
 
-static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
-{
-       s->flags = kmem_cache_flags(flags, s->name);
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
-       s->random = get_random_long();
-#endif
-
-       if (!calculate_sizes(s))
-               goto error;
-       if (disable_higher_order_debug) {
-               /*
-                * Disable debugging flags that store metadata if the min slab
-                * order increased.
-                */
-               if (get_order(s->size) > get_order(s->object_size)) {
-                       s->flags &= ~DEBUG_METADATA_FLAGS;
-                       s->offset = 0;
-                       if (!calculate_sizes(s))
-                               goto error;
-               }
-       }
-
-#ifdef system_has_freelist_aba
-       if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
-               /* Enable fast mode */
-               s->flags |= __CMPXCHG_DOUBLE;
-       }
-#endif
-
-       /*
-        * The larger the object size is, the more slabs we want on the partial
-        * list to avoid pounding the page allocator excessively.
-        */
-       s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
-       s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
-
-       set_cpu_partial(s);
-
-#ifdef CONFIG_NUMA
-       s->remote_node_defrag_ratio = 1000;
-#endif
-
-       /* Initialize the pre-computed randomized freelist if slab is up */
-       if (slab_state >= UP) {
-               if (init_cache_random_seq(s))
-                       goto error;
-       }
-
-       if (!init_kmem_cache_nodes(s))
-               goto error;
-
-       if (alloc_kmem_cache_cpus(s))
-               return 0;
-
-error:
-       __kmem_cache_release(s);
-       return -EINVAL;
-}
-
 static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
                              const char *text)
 {
@@ -5904,26 +5845,77 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 
 int do_kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
 {
-       int err;
+       int err = -EINVAL;
 
-       err = kmem_cache_open(s, flags);
-       if (err)
-               return err;
+       s->flags = kmem_cache_flags(flags, s->name);
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+       s->random = get_random_long();
+#endif
+
+       if (!calculate_sizes(s))
+               goto out;
+       if (disable_higher_order_debug) {
+               /*
+                * Disable debugging flags that store metadata if the min slab
+                * order increased.
+                */
+               if (get_order(s->size) > get_order(s->object_size)) {
+                       s->flags &= ~DEBUG_METADATA_FLAGS;
+                       s->offset = 0;
+                       if (!calculate_sizes(s))
+                               goto out;
+               }
+       }
+
+#ifdef system_has_freelist_aba
+       if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
+               /* Enable fast mode */
+               s->flags |= __CMPXCHG_DOUBLE;
+       }
+#endif
+
+       /*
+        * The larger the object size is, the more slabs we want on the partial
+        * list to avoid pounding the page allocator excessively.
+        */
+       s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+       s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
+
+       set_cpu_partial(s);
+
+#ifdef CONFIG_NUMA
+       s->remote_node_defrag_ratio = 1000;
+#endif
+
+       /* Initialize the pre-computed randomized freelist if slab is up */
+       if (slab_state >= UP) {
+               if (init_cache_random_seq(s))
+                       goto out;
+       }
+
+       if (!init_kmem_cache_nodes(s))
+               goto out;
+
+       if (!alloc_kmem_cache_cpus(s))
+               goto out;
 
        /* Mutex is not taken during early boot */
-       if (slab_state <= UP)
-               return 0;
+       if (slab_state <= UP) {
+               err = 0;
+               goto out;
+       }
 
        err = sysfs_slab_add(s);
-       if (err) {
-               __kmem_cache_release(s);
-               return err;
-       }
+       if (err)
+               goto out;
 
        if (s->flags & SLAB_STORE_USER)
                debugfs_slab_add(s);
 
-       return 0;
+out:
+       if (err)
+               __kmem_cache_release(s);
+       return err;
 }
 
 #ifdef SLAB_SUPPORTS_SYSFS