From: Greg Kroah-Hartman Date: Tue, 10 Sep 2024 07:36:53 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v4.19.322~29 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=3840363eff2057c32b2ee8de2cafd0dde331f8aa;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch x86-mm-fix-pti-for-i386-some-more.patch --- diff --git a/queue-5.15/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch b/queue-5.15/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch new file mode 100644 index 00000000000..2c976a7366b --- /dev/null +++ b/queue-5.15/net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch @@ -0,0 +1,59 @@ +From 626dfed5fa3bfb41e0dffd796032b555b69f9cde Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Thu, 4 Jul 2024 08:41:57 +0200 +Subject: net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket + +From: Daniel Borkmann + +commit 626dfed5fa3bfb41e0dffd796032b555b69f9cde upstream. + +When using a BPF program on kernel_connect(), the call can return -EPERM. This +causes xs_tcp_setup_socket() to loop forever, filling up the syslog and causing +the kernel to potentially freeze up. + +Neil suggested: + + This will propagate -EPERM up into other layers which might not be ready + to handle it. It might be safer to map EPERM to an error we would be more + likely to expect from the network system - such as ECONNREFUSED or ENETDOWN. + +ECONNREFUSED as error seems reasonable. For programs setting a different error +can be out of reach (see handling in 4fbac77d2d09) in particular on kernels +which do not have f10d05966196 ("bpf: Make BPF_PROG_RUN_ARRAY return -err +instead of allow boolean"), thus given that it is better to simply remap for +consistent behavior. UDP does handle EPERM in xs_udp_send_request(). + +Fixes: d74bad4e74ee ("bpf: Hooks for sys_connect") +Fixes: 4fbac77d2d09 ("bpf: Hooks for sys_bind") +Co-developed-by: Lex Siegel +Signed-off-by: Lex Siegel +Signed-off-by: Daniel Borkmann +Cc: Neil Brown +Cc: Trond Myklebust +Cc: Anna Schumaker +Link: https://github.com/cilium/cilium/issues/33395 +Link: https://lore.kernel.org/bpf/171374175513.12877.8993642908082014881@noble.neil.brown.name +Link: https://patch.msgid.link/9069ec1d59e4b2129fc23433349fd5580ad43921.1720075070.git.daniel@iogearbox.net +Signed-off-by: Paolo Abeni +Signed-off-by: Hugo SIMELIERE +Signed-off-by: Greg Kroah-Hartman +--- + net/sunrpc/xprtsock.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -2335,6 +2335,13 @@ static void xs_tcp_setup_socket(struct w + case -EALREADY: + xprt_unlock_connect(xprt, transport); + return; ++ case -EPERM: ++ /* Happens, for instance, if a BPF program is preventing ++ * the connect. Remap the error so upper layers can better ++ * deal with it. ++ */ ++ status = -ECONNREFUSED; ++ fallthrough; + case -EINVAL: + /* Happens, for instance, if the user specified a link + * local IPv6 address without a scope-id. diff --git a/queue-5.15/series b/queue-5.15/series index 236d6ffcb40..35a86c809c7 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -208,3 +208,5 @@ net-more-strict-virtio_net_hdr_gso_udp_l4-validation.patch net-change-maximum-number-of-udp-segments-to-128.patch gso-fix-dodgy-bit-handling-for-gso_udp_l4.patch net-drop-bad-gso-csum_start-and-offset-in-virtio_net_hdr.patch +x86-mm-fix-pti-for-i386-some-more.patch +net-sunrpc-remap-eperm-in-case-of-connection-failure-in-xs_tcp_setup_socket.patch diff --git a/queue-5.15/x86-mm-fix-pti-for-i386-some-more.patch b/queue-5.15/x86-mm-fix-pti-for-i386-some-more.patch new file mode 100644 index 00000000000..8f14ca22380 --- /dev/null +++ b/queue-5.15/x86-mm-fix-pti-for-i386-some-more.patch @@ -0,0 +1,160 @@ +From c48b5a4cf3125adb679e28ef093f66ff81368d05 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 6 Aug 2024 20:48:43 +0200 +Subject: x86/mm: Fix PTI for i386 some more + +From: Thomas Gleixner + +commit c48b5a4cf3125adb679e28ef093f66ff81368d05 upstream. + +So it turns out that we have to do two passes of +pti_clone_entry_text(), once before initcalls, such that device and +late initcalls can use user-mode-helper / modprobe and once after +free_initmem() / mark_readonly(). + +Now obviously mark_readonly() can cause PMD splits, and +pti_clone_pgtable() doesn't like that much. + +Allow the late clone to split PMDs so that pagetables stay in sync. + +[peterz: Changelog and comments] +Reported-by: Guenter Roeck +Signed-off-by: Thomas Gleixner +Signed-off-by: Peter Zijlstra (Intel) +Tested-by: Guenter Roeck +Link: https://lkml.kernel.org/r/20240806184843.GX37996@noisy.programming.kicks-ass.net +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/mm/pti.c | 45 +++++++++++++++++++++++++++++---------------- + 1 file changed, 29 insertions(+), 16 deletions(-) + +--- a/arch/x86/mm/pti.c ++++ b/arch/x86/mm/pti.c +@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pm + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +-static pte_t *pti_user_pagetable_walk_pte(unsigned long address) ++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text) + { + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd; +@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pt + if (!pmd) + return NULL; + +- /* We can't do anything sensible if we hit a large mapping. */ ++ /* Large PMD mapping found */ + if (pmd_large(*pmd)) { +- WARN_ON(1); +- return NULL; ++ /* Clear the PMD if we hit a large mapping from the first round */ ++ if (late_text) { ++ set_pmd(pmd, __pmd(0)); ++ } else { ++ WARN_ON_ONCE(1); ++ return NULL; ++ } + } + + if (pmd_none(*pmd)) { +@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(vo + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + +- target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); ++ target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false); + if (WARN_ON(!target_pte)) + return; + +@@ -301,7 +306,7 @@ enum pti_clone_level { + + static void + pti_clone_pgtable(unsigned long start, unsigned long end, +- enum pti_clone_level level) ++ enum pti_clone_level level, bool late_text) + { + unsigned long addr; + +@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, u + return; + + /* Allocate PTE in the user page-table */ +- target_pte = pti_user_pagetable_walk_pte(addr); ++ target_pte = pti_user_pagetable_walk_pte(addr, late_text); + if (WARN_ON(!target_pte)) + return; + +@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared + phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); + pte_t *target_pte; + +- target_pte = pti_user_pagetable_walk_pte(va); ++ target_pte = pti_user_pagetable_walk_pte(va, false); + if (WARN_ON(!target_pte)) + return; + +@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared + start = CPU_ENTRY_AREA_BASE; + end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); + +- pti_clone_pgtable(start, end, PTI_CLONE_PMD); ++ pti_clone_pgtable(start, end, PTI_CLONE_PMD, false); + } + #endif /* CONFIG_X86_64 */ + +@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(vo + /* + * Clone the populated PMDs of the entry text and force it RO. + */ +-static void pti_clone_entry_text(void) ++static void pti_clone_entry_text(bool late) + { + pti_clone_pgtable((unsigned long) __entry_text_start, + (unsigned long) __entry_text_end, +- PTI_LEVEL_KERNEL_IMAGE); ++ PTI_LEVEL_KERNEL_IMAGE, late); + } + + /* +@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void) + * pti_set_kernel_image_nonglobal() did to clear the + * global bit. + */ +- pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); ++ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false); + + /* + * pti_clone_pgtable() will set the global bit in any PMDs +@@ -638,8 +643,15 @@ void __init pti_init(void) + + /* Undo all global bits from the init pagetables in head_64.S: */ + pti_set_kernel_image_nonglobal(); ++ + /* Replace some of the global bits just for shared entry text: */ +- pti_clone_entry_text(); ++ /* ++ * This is very early in boot. Device and Late initcalls can do ++ * modprobe before free_initmem() and mark_readonly(). This ++ * pti_clone_entry_text() allows those user-mode-helpers to function, ++ * but notably the text is still RW. ++ */ ++ pti_clone_entry_text(false); + pti_setup_espfix64(); + pti_setup_vsyscall(); + } +@@ -656,10 +668,11 @@ void pti_finalize(void) + if (!boot_cpu_has(X86_FEATURE_PTI)) + return; + /* +- * We need to clone everything (again) that maps parts of the +- * kernel image. ++ * This is after free_initmem() (all initcalls are done) and we've done ++ * mark_readonly(). Text is now NX which might've split some PMDs ++ * relative to the early clone. + */ +- pti_clone_entry_text(); ++ pti_clone_entry_text(true); + pti_clone_kernel_text(); + + debug_checkwx_user();