From: Greg Kroah-Hartman Date: Thu, 8 Jan 2026 16:09:54 +0000 (+0100) Subject: 5.15-stable patches X-Git-Tag: v6.1.160~29 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5a7d4a0f506944a01fcb485e213a9c85e477d7b0;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: mm-fix-copy_from_user_nofault.patch x86-remove-__range_not_ok.patch --- diff --git a/queue-5.15/mm-fix-copy_from_user_nofault.patch b/queue-5.15/mm-fix-copy_from_user_nofault.patch new file mode 100644 index 0000000000..36f653831b --- /dev/null +++ b/queue-5.15/mm-fix-copy_from_user_nofault.patch @@ -0,0 +1,77 @@ +From stable+bounces-206300-greg=kroah.com@vger.kernel.org Thu Jan 8 14:49:31 2026 +From: Thadeu Lima de Souza Cascardo +Date: Thu, 8 Jan 2026 07:15:45 -0300 +Subject: mm: Fix copy_from_user_nofault(). +To: stable@vger.kernel.org +Cc: Kees Cook , Alexei Starovoitov , Hsin-Wei Hung , Florian Lehner , Thadeu Lima de Souza Cascardo +Message-ID: <20260108101545.2982626-2-cascardo@igalia.com> + +From: Alexei Starovoitov + +commit d319f344561de23e810515d109c7278919bff7b0 upstream. + +There are several issues with copy_from_user_nofault(): + +- access_ok() is designed for user context only and for that reason +it has WARN_ON_IN_IRQ() which triggers when bpf, kprobe, eprobe +and perf on ppc are calling it from irq. + +- it's missing nmi_uaccess_okay() which is a nop on all architectures +except x86 where it's required. +The comment in arch/x86/mm/tlb.c explains the details why it's necessary. +Calling copy_from_user_nofault() from bpf, [ke]probe without this check is not safe. + +- __copy_from_user_inatomic() under CONFIG_HARDENED_USERCOPY is calling +check_object_size()->__check_object_size()->check_heap_object()->find_vmap_area()->spin_lock() +which is not safe to do from bpf, [ke]probe and perf due to potential deadlock. + +Fix all three issues. At the end the copy_from_user_nofault() becomes +equivalent to copy_from_user_nmi() from safety point of view with +a difference in the return value. + +Reported-by: Hsin-Wei Hung +Signed-off-by: Alexei Starovoitov +Signed-off-by: Florian Lehner +Tested-by: Hsin-Wei Hung +Tested-by: Florian Lehner +Link: https://lore.kernel.org/r/20230410174345.4376-2-dev@der-flo.net +Signed-off-by: Alexei Starovoitov +[cascardo: the test in check_heap_objects did not exist] +Signed-off-by: Thadeu Lima de Souza Cascardo +Signed-off-by: Greg Kroah-Hartman +--- + mm/maccess.c | 16 +++++++++++----- + 1 file changed, 11 insertions(+), 5 deletions(-) + +--- a/mm/maccess.c ++++ b/mm/maccess.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src, + size_t size) +@@ -223,11 +224,16 @@ long copy_from_user_nofault(void *dst, c + long ret = -EFAULT; + mm_segment_t old_fs = force_uaccess_begin(); + +- if (access_ok(src, size)) { +- pagefault_disable(); +- ret = __copy_from_user_inatomic(dst, src, size); +- pagefault_enable(); +- } ++ if (!__access_ok(src, size)) ++ return ret; ++ ++ if (!nmi_uaccess_okay()) ++ return ret; ++ ++ pagefault_disable(); ++ ret = __copy_from_user_inatomic(dst, src, size); ++ pagefault_enable(); ++ + force_uaccess_end(old_fs); + + if (ret) diff --git a/queue-5.15/series b/queue-5.15/series index 868df866dc..febdda4530 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -492,3 +492,5 @@ kvm-arm64-sys_regs-disable-wuninitialized-const-pointer-warning.patch ipv6-fix-potential-uninit-value-access-in-__ip6_make_skb.patch ipv4-fix-uninit-value-access-in-__ip_make_skb.patch selftests-net-test_vxlan_under_vrf-fix-hv-connectivity-test.patch +x86-remove-__range_not_ok.patch +mm-fix-copy_from_user_nofault.patch diff --git a/queue-5.15/x86-remove-__range_not_ok.patch b/queue-5.15/x86-remove-__range_not_ok.patch new file mode 100644 index 0000000000..f4e1fc6b3e --- /dev/null +++ b/queue-5.15/x86-remove-__range_not_ok.patch @@ -0,0 +1,128 @@ +From stable+bounces-206299-greg=kroah.com@vger.kernel.org Thu Jan 8 12:20:35 2026 +From: Thadeu Lima de Souza Cascardo +Date: Thu, 8 Jan 2026 07:15:44 -0300 +Subject: x86: remove __range_not_ok() +To: stable@vger.kernel.org +Cc: Kees Cook , Arnd Bergmann , Al Viro , Christoph Hellwig , Thadeu Lima de Souza Cascardo +Message-ID: <20260108101545.2982626-1-cascardo@igalia.com> + +From: Arnd Bergmann + +commit 36903abedfe8d419e90ce349b2b4ce6dc2883e17 upstream. + +The __range_not_ok() helper is an x86 (and sparc64) specific interface +that does roughly the same thing as __access_ok(), but with different +calling conventions. + +Change this to use the normal interface in order for consistency as we +clean up all access_ok() implementations. + +This changes the limit from TASK_SIZE to TASK_SIZE_MAX, which Al points +out is the right thing do do here anyway. + +The callers have to use __access_ok() instead of the normal access_ok() +though, because on x86 that contains a WARN_ON_IN_IRQ() check that cannot +be used inside of NMI context while tracing. + +The check in copy_code() is not needed any more, because this one is +already done by copy_from_user_nmi(). + +Suggested-by: Al Viro +Suggested-by: Christoph Hellwig +Link: https://lore.kernel.org/lkml/YgsUKcXGR7r4nINj@zeniv-ca.linux.org.uk/ +Signed-off-by: Arnd Bergmann +Stable-dep-of: d319f344561d ("mm: Fix copy_from_user_nofault().") +Signed-off-by: Thadeu Lima de Souza Cascardo +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/core.c | 2 +- + arch/x86/include/asm/uaccess.h | 10 ++++++---- + arch/x86/kernel/dumpstack.c | 6 ------ + arch/x86/kernel/stacktrace.c | 2 +- + arch/x86/lib/usercopy.c | 2 +- + 5 files changed, 9 insertions(+), 13 deletions(-) + +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -2790,7 +2790,7 @@ perf_callchain_kernel(struct perf_callch + static inline int + valid_user_frame(const void __user *fp, unsigned long size) + { +- return (__range_not_ok(fp, size, TASK_SIZE) == 0); ++ return __access_ok(fp, size); + } + + static unsigned long get_segment_base(unsigned int segment) +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -16,8 +16,10 @@ + * Test whether a block of memory is a valid user space address. + * Returns 0 if the range is valid, nonzero otherwise. + */ +-static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) ++static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size) + { ++ unsigned long limit = TASK_SIZE_MAX; ++ + /* + * If we have used "sizeof()" for the size, + * we know it won't overflow the limit (but +@@ -35,10 +37,10 @@ static inline bool __chk_range_not_ok(un + return unlikely(addr > limit); + } + +-#define __range_not_ok(addr, size, limit) \ ++#define __access_ok(addr, size) \ + ({ \ + __chk_user_ptr(addr); \ +- __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ ++ !__chk_range_not_ok((unsigned long __force)(addr), size); \ + }) + + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP +@@ -69,7 +71,7 @@ static inline bool pagefault_disabled(vo + #define access_ok(addr, size) \ + ({ \ + WARN_ON_IN_IRQ(); \ +- likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \ ++ likely(__access_ok(addr, size)); \ + }) + + extern int __get_user_1(void); +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -81,12 +81,6 @@ static int copy_code(struct pt_regs *reg + /* The user space code from other tasks cannot be accessed. */ + if (regs != task_pt_regs(current)) + return -EPERM; +- /* +- * Make sure userspace isn't trying to trick us into dumping kernel +- * memory by pointing the userspace instruction pointer at it. +- */ +- if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) +- return -EINVAL; + + /* + * Even if named copy_from_user_nmi() this can be invoked from +--- a/arch/x86/kernel/stacktrace.c ++++ b/arch/x86/kernel/stacktrace.c +@@ -90,7 +90,7 @@ copy_stack_frame(const struct stack_fram + { + int ret; + +- if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) ++ if (!__access_ok(fp, sizeof(*frame))) + return 0; + + ret = 1; +--- a/arch/x86/lib/usercopy.c ++++ b/arch/x86/lib/usercopy.c +@@ -32,7 +32,7 @@ copy_from_user_nmi(void *to, const void + { + unsigned long ret; + +- if (__range_not_ok(from, n, TASK_SIZE)) ++ if (!__access_ok(from, n)) + return n; + + if (!nmi_uaccess_okay())