]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/slub, kunit: Make slub_kunit unaffected by user specified flags
authorHyeonggon Yoo <42.hyeyoo@gmail.com>
Wed, 6 Apr 2022 06:00:03 +0000 (15:00 +0900)
committerVlastimil Babka <vbabka@suse.cz>
Wed, 6 Apr 2022 08:11:48 +0000 (10:11 +0200)
slub_kunit does not expect other debugging flags to be set when running
tests. When SLAB_RED_ZONE flag is set globally, test fails because the
flag affects number of errors reported.

To make slub_kunit unaffected by user specified debugging flags,
introduce SLAB_NO_USER_FLAGS to ignore them. With this flag, only flags
specified in the code are used and others are ignored.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/r/Yk0sY9yoJhFEXWOg@hyeyoo
include/linux/slab.h
lib/slub_kunit.c
mm/slab.h
mm/slub.c

index 373b3ef99f4e5204b347ef280407885d4650bb7f..11ceddcae9f4d67ed3617b5708cbe52ab352aea1 100644 (file)
 #define SLAB_KASAN             0
 #endif
 
+/*
+ * Ignore user specified debugging flags.
+ * Intended for caches created for self-tests so they have only flags
+ * specified in the code and other flags are ignored.
+ */
+#define SLAB_NO_USER_FLAGS     ((slab_flags_t __force)0x10000000U)
+
 /* The following flags affect the page allocator grouping pages by mobility */
 /* Objects are reclaimable */
 #define SLAB_RECLAIM_ACCOUNT   ((slab_flags_t __force)0x00020000U)
index 8662dc6cb5092206a0c73ff293772ccd1c230a83..7a0564d7cb7aeb3e0fbf886a2a12a3720214cff3 100644 (file)
@@ -12,7 +12,7 @@ static int slab_errors;
 static void test_clobber_zone(struct kunit *test)
 {
        struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
-                               SLAB_RED_ZONE, NULL);
+                               SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
        u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
        kasan_disable_current();
@@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test)
 static void test_next_pointer(struct kunit *test)
 {
        struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
-                               SLAB_POISON, NULL);
+                               SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
        u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
        unsigned long tmp;
        unsigned long *ptr_addr;
@@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test)
 static void test_first_word(struct kunit *test)
 {
        struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
-                               SLAB_POISON, NULL);
+                               SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
        u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
        kmem_cache_free(s, p);
@@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test)
 static void test_clobber_50th_byte(struct kunit *test)
 {
        struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
-                               SLAB_POISON, NULL);
+                               SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
        u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
        kmem_cache_free(s, p);
@@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test)
 static void test_clobber_redzone_free(struct kunit *test)
 {
        struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
-                               SLAB_RED_ZONE, NULL);
+                               SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
        u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
        kasan_disable_current();
index fd7ae2024897d5ccb0bdabb5f079544e3b5183f1..f7d018100994e4c0aeaa9051056792d0c90b176e 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -331,7 +331,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
                          SLAB_ACCOUNT)
 #elif defined(CONFIG_SLUB)
 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
-                         SLAB_TEMPORARY | SLAB_ACCOUNT)
+                         SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS)
 #else
 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
 #endif
@@ -350,7 +350,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
                              SLAB_NOLEAKTRACE | \
                              SLAB_RECLAIM_ACCOUNT | \
                              SLAB_TEMPORARY | \
-                             SLAB_ACCOUNT)
+                             SLAB_ACCOUNT | \
+                             SLAB_NO_USER_FLAGS)
 
 bool __kmem_cache_empty(struct kmem_cache *);
 int __kmem_cache_shutdown(struct kmem_cache *);
index 74d92aa4a3a28d744d3d5dfd4b30f5d632172207..4c78f5919356c288092cc03919b2bd7ced267460 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1584,6 +1584,9 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
        slab_flags_t block_flags;
        slab_flags_t slub_debug_local = slub_debug;
 
+       if (flags & SLAB_NO_USER_FLAGS)
+               return flags;
+
        /*
         * If the slab cache is for debugging (e.g. kmemleak) then
         * don't store user (stack trace) information by default,