]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/kfence: randomize the freelist on initialization
authorPimyn Girgis <pimyn@google.com>
Tue, 3 Feb 2026 20:13:42 +0000 (15:13 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 6 Feb 2026 15:44:25 +0000 (16:44 +0100)
[ Upstream commit 870ff19251bf3910dda7a7245da826924045fedd ]

Randomize the KFENCE freelist during pool initialization to make
allocation patterns less predictable.  This is achieved by shuffling the
order in which metadata objects are added to the freelist using
get_random_u32_below().

Additionally, ensure the error path correctly calculates the address range
to be reset if initialization fails, as the address increment logic has
been moved to a separate loop.

Link: https://lkml.kernel.org/r/20260120161510.3289089-1-pimyn@google.com
Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
Signed-off-by: Pimyn Girgis <pimyn@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Ernesto Martnez Garca <ernesto.martinezgarcia@tugraz.at>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Kees Cook <kees@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[ replaced kfence_metadata_init with kfence_metadata ]
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/kfence/core.c

index 799d8503f35f0efdcfd9aeb99549d5c9581bb167..edf6deb382b679ac0786d6b4a1b1538293ceaa34 100644 (file)
@@ -542,7 +542,7 @@ static unsigned long kfence_init_pool(void)
 {
        unsigned long addr = (unsigned long)__kfence_pool;
        struct page *pages;
-       int i;
+       int i, rand;
 
        if (!arch_kfence_init_pool())
                return addr;
@@ -590,19 +590,34 @@ static unsigned long kfence_init_pool(void)
                INIT_LIST_HEAD(&meta->list);
                raw_spin_lock_init(&meta->lock);
                meta->state = KFENCE_OBJECT_UNUSED;
-               meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
-               list_add_tail(&meta->list, &kfence_freelist);
+               /* Use addr to randomize the freelist. */
+               meta->addr = i;
 
                /* Protect the right redzone. */
-               if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+               if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
                        goto reset_slab;
+       }
+
+       for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
+               rand = get_random_u32_below(i);
+               swap(kfence_metadata[i - 1].addr, kfence_metadata[rand].addr);
+       }
 
+       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+               struct kfence_metadata *meta_1 = &kfence_metadata[i];
+               struct kfence_metadata *meta_2 = &kfence_metadata[meta_1->addr];
+
+               list_add_tail(&meta_2->list, &kfence_freelist);
+       }
+       for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+               kfence_metadata[i].addr = addr;
                addr += 2 * PAGE_SIZE;
        }
 
        return 0;
 
 reset_slab:
+       addr += 2 * i * PAGE_SIZE;
        for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
                struct slab *slab = page_slab(nth_page(pages, i));