]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/kfence: randomize the freelist on initialization
authorPimyn Girgis <pimyn@google.com>
Tue, 20 Jan 2026 16:15:10 +0000 (17:15 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 03:03:48 +0000 (19:03 -0800)
Randomize the KFENCE freelist during pool initialization to make
allocation patterns less predictable.  This is achieved by shuffling the
order in which metadata objects are added to the freelist using
get_random_u32_below().

Additionally, ensure the error path correctly calculates the address range
to be reset if initialization fails, as the address increment logic has
been moved to a separate loop.

Link: https://lkml.kernel.org/r/20260120161510.3289089-1-pimyn@google.com
Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
Signed-off-by: Pimyn Girgis <pimyn@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Ernesto Martnez Garca <ernesto.martinezgarcia@tugraz.at>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Kees Cook <kees@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/kfence/core.c

index da0f5b6f57446f29fc8a0dae7b017b0923ecc947..4f79ec72075254666bf0e0d34798b1f4bef3e2b7 100644 (file)
@@ -596,7 +596,7 @@ static void rcu_guarded_free(struct rcu_head *h)
 static unsigned long kfence_init_pool(void)
 {
        unsigned long addr, start_pfn;
-       int i;
+       int i, rand;
 
        if (!arch_kfence_init_pool())
                return (unsigned long)__kfence_pool;
@@ -647,13 +647,27 @@ static unsigned long kfence_init_pool(void)
                INIT_LIST_HEAD(&meta->list);
                raw_spin_lock_init(&meta->lock);
                meta->state = KFENCE_OBJECT_UNUSED;
-               meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
-               list_add_tail(&meta->list, &kfence_freelist);
+               /* Use addr to randomize the freelist. */
+               meta->addr = i;
 
                /* Protect the right redzone. */
-               if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+               if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
                        goto reset_slab;
+       }
+
+       for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
+               rand = get_random_u32_below(i);
+               swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr);
+       }
 
+       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+               struct kfence_metadata *meta_1 = &kfence_metadata_init[i];
+               struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr];
+
+               list_add_tail(&meta_2->list, &kfence_freelist);
+       }
+       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+               kfence_metadata_init[i].addr = addr;
                addr += 2 * PAGE_SIZE;
        }
 
@@ -666,6 +680,7 @@ static unsigned long kfence_init_pool(void)
        return 0;
 
 reset_slab:
+       addr += 2 * i * PAGE_SIZE;
        for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
                struct page *page;