]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Jan 2026 11:12:48 +0000 (12:12 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Jan 2026 11:12:48 +0000 (12:12 +0100)
added patches:
powerpc-64s-radix-kfence-map-__kfence_pool-at-page-granularity.patch

queue-6.1/powerpc-64s-radix-kfence-map-__kfence_pool-at-page-granularity.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/powerpc-64s-radix-kfence-map-__kfence_pool-at-page-granularity.patch b/queue-6.1/powerpc-64s-radix-kfence-map-__kfence_pool-at-page-granularity.patch
new file mode 100644 (file)
index 0000000..bebd212
--- /dev/null
@@ -0,0 +1,228 @@
+From 353d7a84c214f184d5a6b62acdec8b4424159b7c Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.ibm.com>
+Date: Mon, 1 Jul 2024 18:30:21 +0530
+Subject: powerpc/64s/radix/kfence: map __kfence_pool at page granularity
+
+From: Hari Bathini <hbathini@linux.ibm.com>
+
+commit 353d7a84c214f184d5a6b62acdec8b4424159b7c upstream.
+
+When KFENCE is enabled, total system memory is mapped at page level
+granularity. But in radix MMU mode, ~3GB additional memory is needed
+to map 100GB of system memory at page level granularity when compared
+to using 2MB direct mapping.This is not desired considering KFENCE is
+designed to be enabled in production kernels [1].
+
+Mapping only the memory allocated for KFENCE pool at page granularity is
+sufficient to enable KFENCE support. So, allocate __kfence_pool during
+bootup and map it at page granularity instead of mapping all system
+memory at page granularity.
+
+Without patch:
+  # cat /proc/meminfo
+  MemTotal:       101201920 kB
+
+With patch:
+  # cat /proc/meminfo
+  MemTotal:       104483904 kB
+
+Note that enabling KFENCE at runtime is disabled for radix MMU for now,
+as it depends on the ability to split page table mappings and such APIs
+are not currently implemented for radix MMU.
+
+All kfence_test.c testcases passed with this patch.
+
+[1] https://lore.kernel.org/all/20201103175841.3495947-2-elver@google.com/
+
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240701130021.578240-1-hbathini@linux.ibm.com
+Cc: Aboorva Devarajan <aboorvad@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/kfence.h        |   11 +++-
+ arch/powerpc/mm/book3s64/radix_pgtable.c |   84 +++++++++++++++++++++++++++++--
+ arch/powerpc/mm/init-common.c            |    3 +
+ 3 files changed, 93 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/kfence.h
++++ b/arch/powerpc/include/asm/kfence.h
+@@ -15,10 +15,19 @@
+ #define ARCH_FUNC_PREFIX "."
+ #endif
++#ifdef CONFIG_KFENCE
++extern bool kfence_disabled;
++
++static inline void disable_kfence(void)
++{
++      kfence_disabled = true;
++}
++
+ static inline bool arch_kfence_init_pool(void)
+ {
+-      return true;
++      return !kfence_disabled;
+ }
++#endif
+ #ifdef CONFIG_PPC64
+ static inline bool kfence_protect_page(unsigned long addr, bool protect)
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -17,6 +17,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/string_helpers.h>
+ #include <linux/memory.h>
++#include <linux/kfence.h>
+ #include <asm/pgalloc.h>
+ #include <asm/mmu_context.h>
+@@ -31,6 +32,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/ultravisor.h>
+ #include <asm/set_memory.h>
++#include <asm/kfence.h>
+ #include <trace/events/thp.h>
+@@ -294,7 +296,8 @@ static unsigned long next_boundary(unsig
+ static int __meminit create_physical_mapping(unsigned long start,
+                                            unsigned long end,
+-                                           int nid, pgprot_t _prot)
++                                           int nid, pgprot_t _prot,
++                                           unsigned long mapping_sz_limit)
+ {
+       unsigned long vaddr, addr, mapping_size = 0;
+       bool prev_exec, exec = false;
+@@ -302,7 +305,10 @@ static int __meminit create_physical_map
+       int psize;
+       unsigned long max_mapping_size = radix_mem_block_size;
+-      if (debug_pagealloc_enabled_or_kfence())
++      if (mapping_sz_limit < max_mapping_size)
++              max_mapping_size = mapping_sz_limit;
++
++      if (debug_pagealloc_enabled())
+               max_mapping_size = PAGE_SIZE;
+       start = ALIGN(start, PAGE_SIZE);
+@@ -357,8 +363,74 @@ static int __meminit create_physical_map
+       return 0;
+ }
++#ifdef CONFIG_KFENCE
++static bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
++
++static int __init parse_kfence_early_init(char *arg)
++{
++      int val;
++
++      if (get_option(&arg, &val))
++              kfence_early_init = !!val;
++      return 0;
++}
++early_param("kfence.sample_interval", parse_kfence_early_init);
++
++static inline phys_addr_t alloc_kfence_pool(void)
++{
++      phys_addr_t kfence_pool;
++
++      /*
++       * TODO: Support to enable KFENCE after bootup depends on the ability to
++       *       split page table mappings. As such support is not currently
++       *       implemented for radix pagetables, support enabling KFENCE
++       *       only at system startup for now.
++       *
++       *       After support for splitting mappings is available on radix,
++       *       alloc_kfence_pool() & map_kfence_pool() can be dropped and
++       *       mapping for __kfence_pool memory can be
++       *       split during arch_kfence_init_pool().
++       */
++      if (!kfence_early_init)
++              goto no_kfence;
++
++      kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
++      if (!kfence_pool)
++              goto no_kfence;
++
++      memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
++      return kfence_pool;
++
++no_kfence:
++      disable_kfence();
++      return 0;
++}
++
++static inline void map_kfence_pool(phys_addr_t kfence_pool)
++{
++      if (!kfence_pool)
++              return;
++
++      if (create_physical_mapping(kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
++                                  -1, PAGE_KERNEL, PAGE_SIZE))
++              goto err;
++
++      memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
++      __kfence_pool = __va(kfence_pool);
++      return;
++
++err:
++      memblock_phys_free(kfence_pool, KFENCE_POOL_SIZE);
++      disable_kfence();
++}
++#else
++static inline phys_addr_t alloc_kfence_pool(void) { return 0; }
++static inline void map_kfence_pool(phys_addr_t kfence_pool) { }
++#endif
++
+ static void __init radix_init_pgtable(void)
+ {
++      phys_addr_t kfence_pool;
+       unsigned long rts_field;
+       phys_addr_t start, end;
+       u64 i;
+@@ -366,6 +438,8 @@ static void __init radix_init_pgtable(vo
+       /* We don't support slb for radix */
+       slb_set_size(0);
++      kfence_pool = alloc_kfence_pool();
++
+       /*
+        * Create the linear mapping
+        */
+@@ -382,9 +456,11 @@ static void __init radix_init_pgtable(vo
+               }
+               WARN_ON(create_physical_mapping(start, end,
+-                                              -1, PAGE_KERNEL));
++                                              -1, PAGE_KERNEL, ~0UL));
+       }
++      map_kfence_pool(kfence_pool);
++
+       if (!cpu_has_feature(CPU_FTR_HVMODE) &&
+                       cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+               /*
+@@ -889,7 +965,7 @@ int __meminit radix__create_section_mapp
+       }
+       return create_physical_mapping(__pa(start), __pa(end),
+-                                     nid, prot);
++                                     nid, prot, ~0UL);
+ }
+ int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
+--- a/arch/powerpc/mm/init-common.c
++++ b/arch/powerpc/mm/init-common.c
+@@ -31,6 +31,9 @@ EXPORT_SYMBOL_GPL(kernstart_virt_addr);
+ bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
+ bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
++#ifdef CONFIG_KFENCE
++bool __ro_after_init kfence_disabled;
++#endif
+ static int __init parse_nosmep(char *p)
+ {
index 84e9ee0a2fae008f7acad9ec0fb868004665ad91..c3c1c1360fcdca54e4dd7785a2b99fb5f5879001 100644 (file)
@@ -511,3 +511,4 @@ drm-i915-gem-zero-initialize-the-eb.vma-array-in-i915_gem_do_execbuffer.patch
 drm-nouveau-dispnv50-don-t-call-drm_atomic_get_crtc_state-in-prepare_fb.patch
 blk-mq-add-helper-for-checking-if-one-cpu-is-mapped-to-specified-hctx.patch
 tpm-cap-the-number-of-pcr-banks.patch
+powerpc-64s-radix-kfence-map-__kfence_pool-at-page-granularity.patch