From: Greg Kroah-Hartman Date: Tue, 17 Dec 2024 14:12:15 +0000 (+0100) Subject: 5.15-stable patches X-Git-Tag: v5.4.288~16 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=c34b4b5488b890be2433856a1f16add1c2bd9905;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: objtool-x86-allow-syscall-instruction.patch x86-make-get_cpu_vendor-accessible-from-xen-code.patch x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch x86-xen-add-central-hypercall-functions.patch x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch x86-xen-remove-hypercall-page.patch x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch xen-netfront-fix-crash-when-removing-device.patch --- diff --git a/queue-5.15/objtool-x86-allow-syscall-instruction.patch b/queue-5.15/objtool-x86-allow-syscall-instruction.patch new file mode 100644 index 00000000000..6f5128a7d3a --- /dev/null +++ b/queue-5.15/objtool-x86-allow-syscall-instruction.patch @@ -0,0 +1,43 @@ +From 40e992abff21d1ee0f83c1221412b4759f82b3eb Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Fri, 29 Nov 2024 15:47:49 +0100 +Subject: objtool/x86: allow syscall instruction + +From: Juergen Gross + +commit dda014ba59331dee4f3b773a020e109932f4bd24 upstream. + +The syscall instruction is used in Xen PV mode for doing hypercalls. +Allow syscall to be used in the kernel in case it is tagged with an +unwind hint for objtool. + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Co-developed-by: Peter Zijlstra +Signed-off-by: Greg Kroah-Hartman +--- + tools/objtool/check.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -3206,10 +3206,13 @@ static int validate_branch(struct objtoo + break; + + case INSN_CONTEXT_SWITCH: +- if (func && (!next_insn || !next_insn->hint)) { +- WARN_FUNC("unsupported instruction in callable function", +- sec, insn->offset); +- return 1; ++ if (func) { ++ if (!next_insn || !next_insn->hint) { ++ WARN_FUNC("unsupported instruction in callable function", ++ sec, insn->offset); ++ return 1; ++ } ++ break; + } + return 0; + diff --git a/queue-5.15/series b/queue-5.15/series index b77e7a6b2b3..45f080cc979 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -40,3 +40,11 @@ blk-iocost-avoid-using-clamp-on-inuse-in-__propagate.patch bpf-sync_linked_regs-must-preserve-subreg_def.patch tracing-kprobes-skip-symbol-counting-logic-for-module-symbols-in-create_local_trace_kprobe.patch revert-parisc-fix-a-possible-dma-corruption.patch +xen-netfront-fix-crash-when-removing-device.patch +x86-make-get_cpu_vendor-accessible-from-xen-code.patch +objtool-x86-allow-syscall-instruction.patch +x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch +x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch +x86-xen-add-central-hypercall-functions.patch +x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch +x86-xen-remove-hypercall-page.patch diff --git a/queue-5.15/x86-make-get_cpu_vendor-accessible-from-xen-code.patch b/queue-5.15/x86-make-get_cpu_vendor-accessible-from-xen-code.patch new file mode 100644 index 00000000000..92dcfbf349b --- /dev/null +++ b/queue-5.15/x86-make-get_cpu_vendor-accessible-from-xen-code.patch @@ -0,0 +1,105 @@ +From 42b79b2d7f411e4187a63b0c76142784310d74d6 Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Thu, 17 Oct 2024 08:29:48 +0200 +Subject: x86: make get_cpu_vendor() accessible from Xen code + +From: Juergen Gross + +commit efbcd61d9bebb771c836a3b8bfced8165633db7c upstream. + +In order to be able to differentiate between AMD and Intel based +systems for very early hypercalls without having to rely on the Xen +hypercall page, make get_cpu_vendor() non-static. + +Refactor early_cpu_init() for the same reason by splitting out the +loop initializing cpu_devs() into an externally callable function. + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/processor.h | 2 ++ + arch/x86/kernel/cpu/common.c | 36 +++++++++++++++++++++--------------- + 2 files changed, 23 insertions(+), 15 deletions(-) + +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -196,6 +196,8 @@ static inline unsigned long long l1tf_pf + return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); + } + ++void init_cpu_devs(void); ++void get_cpu_vendor(struct cpuinfo_x86 *c); + extern void early_cpu_init(void); + extern void identify_boot_cpu(void); + extern void identify_secondary_cpu(struct cpuinfo_x86 *); +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -798,7 +798,7 @@ void detect_ht(struct cpuinfo_x86 *c) + #endif + } + +-static void get_cpu_vendor(struct cpuinfo_x86 *c) ++void get_cpu_vendor(struct cpuinfo_x86 *c) + { + char *v = c->x86_vendor_id; + int i; +@@ -1522,15 +1522,11 @@ static void __init early_identify_cpu(st + detect_nopl(); + } + +-void __init early_cpu_init(void) ++void __init init_cpu_devs(void) + { + const struct cpu_dev *const *cdev; + int count = 0; + +-#ifdef CONFIG_PROCESSOR_SELECT +- pr_info("KERNEL supported cpus:\n"); +-#endif +- + for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { + const struct cpu_dev *cpudev = *cdev; + +@@ -1538,20 +1534,30 @@ void __init early_cpu_init(void) + break; + cpu_devs[count] = cpudev; + count++; ++ } ++} + ++void __init early_cpu_init(void) ++{ + #ifdef CONFIG_PROCESSOR_SELECT +- { +- unsigned int j; ++ unsigned int i, j; + +- for (j = 0; j < 2; j++) { +- if (!cpudev->c_ident[j]) +- continue; +- pr_info(" %s %s\n", cpudev->c_vendor, +- cpudev->c_ident[j]); +- } +- } ++ pr_info("KERNEL supported cpus:\n"); + #endif ++ ++ init_cpu_devs(); ++ ++#ifdef CONFIG_PROCESSOR_SELECT ++ for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) { ++ for (j = 0; j < 2; j++) { ++ if (!cpu_devs[i]->c_ident[j]) ++ continue; ++ pr_info(" %s %s\n", cpu_devs[i]->c_vendor, ++ cpu_devs[i]->c_ident[j]); ++ } + } ++#endif ++ + early_identify_cpu(&boot_cpu_data); + } + diff --git a/queue-5.15/x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch b/queue-5.15/x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch new file mode 100644 index 00000000000..a41c84cbf79 --- /dev/null +++ b/queue-5.15/x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch @@ -0,0 +1,178 @@ +From 53305b6dfa3c6177a257144427baa3ab5deb229d Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Fri, 29 Nov 2024 16:15:54 +0100 +Subject: x86/static-call: provide a way to do very early static-call updates + +From: Juergen Gross + +commit 0ef8047b737d7480a5d4c46d956e97c190f13050 upstream. + +Add static_call_update_early() for updating static-call targets in +very early boot. + +This will be needed for support of Xen guest type specific hypercall +functions. + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Co-developed-by: Peter Zijlstra +Co-developed-by: Josh Poimboeuf +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/static_call.h | 15 +++++++++++++++ + arch/x86/include/asm/sync_core.h | 6 +++--- + arch/x86/kernel/static_call.c | 10 ++++++++++ + include/linux/compiler.h | 32 +++++++++++++++++++++++--------- + include/linux/static_call.h | 1 + + kernel/static_call_inline.c | 2 +- + 6 files changed, 53 insertions(+), 13 deletions(-) + +--- a/arch/x86/include/asm/static_call.h ++++ b/arch/x86/include/asm/static_call.h +@@ -61,4 +61,19 @@ + + extern bool __static_call_fixup(void *tramp, u8 op, void *dest); + ++extern void __static_call_update_early(void *tramp, void *func); ++ ++#define static_call_update_early(name, _func) \ ++({ \ ++ typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \ ++ if (static_call_initialized) { \ ++ __static_call_update(&STATIC_CALL_KEY(name), \ ++ STATIC_CALL_TRAMP_ADDR(name), __F);\ ++ } else { \ ++ WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \ ++ __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\ ++ __F); \ ++ } \ ++}) ++ + #endif /* _ASM_STATIC_CALL_H */ +--- a/arch/x86/include/asm/sync_core.h ++++ b/arch/x86/include/asm/sync_core.h +@@ -8,7 +8,7 @@ + #include + + #ifdef CONFIG_X86_32 +-static inline void iret_to_self(void) ++static __always_inline void iret_to_self(void) + { + asm volatile ( + "pushfl\n\t" +@@ -19,7 +19,7 @@ static inline void iret_to_self(void) + : ASM_CALL_CONSTRAINT : : "memory"); + } + #else +-static inline void iret_to_self(void) ++static __always_inline void iret_to_self(void) + { + unsigned int tmp; + +@@ -55,7 +55,7 @@ static inline void iret_to_self(void) + * Like all of Linux's memory ordering operations, this is a + * compiler barrier as well. + */ +-static inline void sync_core(void) ++static __always_inline void sync_core(void) + { + /* + * The SERIALIZE instruction is the most straightforward way to +--- a/arch/x86/kernel/static_call.c ++++ b/arch/x86/kernel/static_call.c +@@ -2,6 +2,7 @@ + #include + #include + #include ++#include + #include + + enum insn_type { +@@ -165,6 +166,15 @@ void arch_static_call_transform(void *si + } + EXPORT_SYMBOL_GPL(arch_static_call_transform); + ++noinstr void __static_call_update_early(void *tramp, void *func) ++{ ++ BUG_ON(system_state != SYSTEM_BOOTING); ++ BUG_ON(!early_boot_irqs_disabled); ++ BUG_ON(static_call_initialized); ++ __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE); ++ sync_core(); ++} ++ + #ifdef CONFIG_RETHUNK + /* + * This is called by apply_returns() to fix up static call trampolines, +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -222,6 +222,23 @@ void ftrace_likely_update(struct ftrace_ + + #endif /* __KERNEL__ */ + ++/** ++ * offset_to_ptr - convert a relative memory offset to an absolute pointer ++ * @off: the address of the 32-bit offset value ++ */ ++static inline void *offset_to_ptr(const int *off) ++{ ++ return (void *)((unsigned long)off + *off); ++} ++ ++#endif /* __ASSEMBLY__ */ ++ ++#ifdef CONFIG_64BIT ++#define ARCH_SEL(a,b) a ++#else ++#define ARCH_SEL(a,b) b ++#endif ++ + /* + * Force the compiler to emit 'sym' as a symbol, so that we can reference + * it from inline assembler. Necessary in case 'sym' could be inlined +@@ -232,16 +249,13 @@ void ftrace_likely_update(struct ftrace_ + static void * __section(".discard.addressable") __used \ + __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; + +-/** +- * offset_to_ptr - convert a relative memory offset to an absolute pointer +- * @off: the address of the 32-bit offset value +- */ +-static inline void *offset_to_ptr(const int *off) +-{ +- return (void *)((unsigned long)off + *off); +-} ++#define __ADDRESSABLE_ASM(sym) \ ++ .pushsection .discard.addressable,"aw"; \ ++ .align ARCH_SEL(8,4); \ ++ ARCH_SEL(.quad, .long) __stringify(sym); \ ++ .popsection; + +-#endif /* __ASSEMBLY__ */ ++#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) + + /* &a[0] degrades to a pointer: a different type from an array */ + #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +--- a/include/linux/static_call.h ++++ b/include/linux/static_call.h +@@ -138,6 +138,7 @@ + #ifdef CONFIG_HAVE_STATIC_CALL + #include + ++extern bool static_call_initialized; + /* + * Either @site or @tramp can be NULL. + */ +--- a/kernel/static_call_inline.c ++++ b/kernel/static_call_inline.c +@@ -15,7 +15,7 @@ extern struct static_call_site __start_s + extern struct static_call_tramp_key __start_static_call_tramp_key[], + __stop_static_call_tramp_key[]; + +-static bool static_call_initialized; ++bool static_call_initialized; + + /* mutex to protect key modules/sites */ + static DEFINE_MUTEX(static_call_mutex); diff --git a/queue-5.15/x86-xen-add-central-hypercall-functions.patch b/queue-5.15/x86-xen-add-central-hypercall-functions.patch new file mode 100644 index 00000000000..fd987314c2c --- /dev/null +++ b/queue-5.15/x86-xen-add-central-hypercall-functions.patch @@ -0,0 +1,323 @@ +From 6aa1ff5e6ada619bf475c1464867a92c0412c98f Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Thu, 17 Oct 2024 11:00:52 +0200 +Subject: x86/xen: add central hypercall functions + +From: Juergen Gross + +commit b4845bb6383821a9516ce30af3a27dc873e37fd4 upstream. + +Add generic hypercall functions usable for all normal (i.e. not iret) +hypercalls. Depending on the guest type and the processor vendor +different functions need to be used due to the to be used instruction +for entering the hypervisor: + +- PV guests need to use syscall +- HVM/PVH guests on Intel need to use vmcall +- HVM/PVH guests on AMD and Hygon need to use vmmcall + +As PVH guests need to issue hypercalls very early during boot, there +is a 4th hypercall function needed for HVM/PVH which can be used on +Intel and AMD processors. It will check the vendor type and then set +the Intel or AMD specific function to use via static_call(). + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Co-developed-by: Peter Zijlstra +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/xen/hypercall.h | 3 + + arch/x86/xen/enlighten.c | 65 +++++++++++++++++++++++++++ + arch/x86/xen/enlighten_hvm.c | 4 + + arch/x86/xen/enlighten_pv.c | 4 + + arch/x86/xen/xen-asm.S | 22 +++++++++ + arch/x86/xen/xen-head.S | 82 +++++++++++++++++++++++++++++++++++ + arch/x86/xen/xen-ops.h | 9 +++ + 7 files changed, 188 insertions(+), 1 deletion(-) + +--- a/arch/x86/include/asm/xen/hypercall.h ++++ b/arch/x86/include/asm/xen/hypercall.h +@@ -88,6 +88,9 @@ struct xen_dm_op_buf; + + extern struct { char _entry[32]; } hypercall_page[]; + ++void xen_hypercall_func(void); ++DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func); ++ + #define __HYPERCALL "call hypercall_page+%c[offset]" + #define __HYPERCALL_ENTRY(x) \ + [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0])) +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -5,6 +5,7 @@ + #endif + #include + #include ++#include + #include + #include + #include +@@ -27,6 +28,9 @@ + + EXPORT_SYMBOL_GPL(hypercall_page); + ++DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm); ++EXPORT_STATIC_CALL_TRAMP(xen_hypercall); ++ + /* + * Pointer to the xen_vcpu_info structure or + * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info +@@ -99,6 +103,67 @@ struct shared_info *HYPERVISOR_shared_in + */ + int xen_have_vcpu_info_placement = 1; + ++static __ref void xen_get_vendor(void) ++{ ++ init_cpu_devs(); ++ cpu_detect(&boot_cpu_data); ++ get_cpu_vendor(&boot_cpu_data); ++} ++ ++void xen_hypercall_setfunc(void) ++{ ++ if (static_call_query(xen_hypercall) != xen_hypercall_hvm) ++ return; ++ ++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD || ++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) ++ static_call_update(xen_hypercall, xen_hypercall_amd); ++ else ++ static_call_update(xen_hypercall, xen_hypercall_intel); ++} ++ ++/* ++ * Evaluate processor vendor in order to select the correct hypercall ++ * function for HVM/PVH guests. ++ * Might be called very early in boot before vendor has been set by ++ * early_cpu_init(). ++ */ ++noinstr void *__xen_hypercall_setfunc(void) ++{ ++ void (*func)(void); ++ ++ /* ++ * Xen is supported only on CPUs with CPUID, so testing for ++ * X86_FEATURE_CPUID is a test for early_cpu_init() having been ++ * run. ++ * ++ * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty ++ * dependency chain: it is being called via the xen_hypercall static ++ * call when running as a PVH or HVM guest. Hypercalls need to be ++ * noinstr due to PV guests using hypercalls in noinstr code. So we ++ * can safely tag the function body as "instrumentation ok", since ++ * the PV guest requirement is not of interest here (xen_get_vendor() ++ * calls noinstr functions, and static_call_update_early() might do ++ * so, too). ++ */ ++ instrumentation_begin(); ++ ++ if (!boot_cpu_has(X86_FEATURE_CPUID)) ++ xen_get_vendor(); ++ ++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD || ++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) ++ func = xen_hypercall_amd; ++ else ++ func = xen_hypercall_intel; ++ ++ static_call_update_early(xen_hypercall, func); ++ ++ instrumentation_end(); ++ ++ return func; ++} ++ + static int xen_cpu_up_online(unsigned int cpu) + { + xen_init_lock_cpu(cpu); +--- a/arch/x86/xen/enlighten_hvm.c ++++ b/arch/x86/xen/enlighten_hvm.c +@@ -284,6 +284,10 @@ static uint32_t __init xen_platform_hvm( + if (xen_pv_domain()) + return 0; + ++ /* Set correct hypercall function. */ ++ if (xen_domain) ++ xen_hypercall_setfunc(); ++ + if (xen_pvh_domain() && nopv) { + /* Guest booting via the Xen-PVH boot entry goes here */ + pr_info("\"nopv\" parameter is ignored in PVH guest\n"); +--- a/arch/x86/xen/enlighten_pv.c ++++ b/arch/x86/xen/enlighten_pv.c +@@ -1207,6 +1207,9 @@ asmlinkage __visible void __init xen_sta + + xen_domain_type = XEN_PV_DOMAIN; + xen_start_flags = xen_start_info->flags; ++ /* Interrupts are guaranteed to be off initially. */ ++ early_boot_irqs_disabled = true; ++ static_call_update_early(xen_hypercall, xen_hypercall_pv); + + xen_setup_features(); + +@@ -1304,7 +1307,6 @@ asmlinkage __visible void __init xen_sta + WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); + + local_irq_disable(); +- early_boot_irqs_disabled = true; + + xen_raw_console_write("mapping kernel into physical memory\n"); + xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, +--- a/arch/x86/xen/xen-asm.S ++++ b/arch/x86/xen/xen-asm.S +@@ -20,9 +20,31 @@ + + #include + #include ++#include + #include <../entry/calling.h> + + /* ++ * PV hypercall interface to the hypervisor. ++ * ++ * Called via inline asm(), so better preserve %rcx and %r11. ++ * ++ * Input: ++ * %eax: hypercall number ++ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall ++ * Output: %rax ++ */ ++SYM_FUNC_START(xen_hypercall_pv) ++ push %rcx ++ push %r11 ++ UNWIND_HINT_SAVE ++ syscall ++ UNWIND_HINT_RESTORE ++ pop %r11 ++ pop %rcx ++ RET ++SYM_FUNC_END(xen_hypercall_pv) ++ ++/* + * Enable events. This clears the event mask and tests the pending + * event status with one and operation. If there are pending events, + * then enter the hypervisor to get them handled. +--- a/arch/x86/xen/xen-head.S ++++ b/arch/x86/xen/xen-head.S +@@ -6,9 +6,11 @@ + + #include + #include ++#include + + #include + #include ++#include + #include + #include + #include +@@ -64,6 +66,86 @@ SYM_CODE_END(asm_cpu_bringup_and_idle) + #endif + #endif + ++ .pushsection .text ++/* ++ * Xen hypercall interface to the hypervisor. ++ * ++ * Input: ++ * %eax: hypercall number ++ * 32-bit: ++ * %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall ++ * 64-bit: ++ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall ++ * Output: %[er]ax ++ */ ++SYM_FUNC_START(xen_hypercall_hvm) ++ FRAME_BEGIN ++ /* Save all relevant registers (caller save and arguments). */ ++#ifdef CONFIG_X86_32 ++ push %eax ++ push %ebx ++ push %ecx ++ push %edx ++ push %esi ++ push %edi ++#else ++ push %rax ++ push %rcx ++ push %rdx ++ push %rdi ++ push %rsi ++ push %r11 ++ push %r10 ++ push %r9 ++ push %r8 ++#ifdef CONFIG_FRAME_POINTER ++ pushq $0 /* Dummy push for stack alignment. */ ++#endif ++#endif ++ /* Set the vendor specific function. */ ++ call __xen_hypercall_setfunc ++ /* Set ZF = 1 if AMD, Restore saved registers. */ ++#ifdef CONFIG_X86_32 ++ lea xen_hypercall_amd, %ebx ++ cmp %eax, %ebx ++ pop %edi ++ pop %esi ++ pop %edx ++ pop %ecx ++ pop %ebx ++ pop %eax ++#else ++ lea xen_hypercall_amd(%rip), %rbx ++ cmp %rax, %rbx ++#ifdef CONFIG_FRAME_POINTER ++ pop %rax /* Dummy pop. */ ++#endif ++ pop %r8 ++ pop %r9 ++ pop %r10 ++ pop %r11 ++ pop %rsi ++ pop %rdi ++ pop %rdx ++ pop %rcx ++ pop %rax ++#endif ++ /* Use correct hypercall function. */ ++ jz xen_hypercall_amd ++ jmp xen_hypercall_intel ++SYM_FUNC_END(xen_hypercall_hvm) ++ ++SYM_FUNC_START(xen_hypercall_amd) ++ vmmcall ++ RET ++SYM_FUNC_END(xen_hypercall_amd) ++ ++SYM_FUNC_START(xen_hypercall_intel) ++ vmcall ++ RET ++SYM_FUNC_END(xen_hypercall_intel) ++ .popsection ++ + .pushsection .text + .balign PAGE_SIZE + SYM_CODE_START(hypercall_page) +--- a/arch/x86/xen/xen-ops.h ++++ b/arch/x86/xen/xen-ops.h +@@ -164,4 +164,13 @@ void xen_hvm_post_suspend(int suspend_ca + static inline void xen_hvm_post_suspend(int suspend_cancelled) {} + #endif + ++#ifdef CONFIG_XEN_PV ++void xen_hypercall_pv(void); ++#endif ++void xen_hypercall_hvm(void); ++void xen_hypercall_amd(void); ++void xen_hypercall_intel(void); ++void xen_hypercall_setfunc(void); ++void *__xen_hypercall_setfunc(void); ++ + #endif /* XEN_OPS_H */ diff --git a/queue-5.15/x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch b/queue-5.15/x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch new file mode 100644 index 00000000000..8c14196e433 --- /dev/null +++ b/queue-5.15/x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch @@ -0,0 +1,79 @@ +From 3333fd9135c27ae814f996fa9fc3fb09aa0754bf Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Wed, 16 Oct 2024 10:40:26 +0200 +Subject: x86/xen: don't do PV iret hypercall through hypercall page + +From: Juergen Gross + +commit a2796dff62d6c6bfc5fbebdf2bee0d5ac0438906 upstream. + +Instead of jumping to the Xen hypercall page for doing the iret +hypercall, directly code the required sequence in xen-asm.S. + +This is done in preparation of no longer using hypercall page at all, +as it has shown to cause problems with speculation mitigations. + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Reviewed-by: Jan Beulich +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/xen/xen-asm.S | 27 ++++++++++++++++++--------- + 1 file changed, 18 insertions(+), 9 deletions(-) + +--- a/arch/x86/xen/xen-asm.S ++++ b/arch/x86/xen/xen-asm.S +@@ -170,7 +170,6 @@ SYM_CODE_START(xen_early_idt_handler_arr + SYM_CODE_END(xen_early_idt_handler_array) + __FINIT + +-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 + /* + * Xen64 iret frame: + * +@@ -180,16 +179,27 @@ hypercall_iret = hypercall_page + __HYPE + * cs + * rip <-- standard iret frame + * +- * flags ++ * flags <-- xen_iret must push from here on + * +- * rcx } +- * r11 }<-- pushed by hypercall page +- * rsp->rax } ++ * rcx ++ * r11 ++ * rsp->rax + */ ++.macro xen_hypercall_iret ++ pushq $0 /* Flags */ ++ push %rcx ++ push %r11 ++ push %rax ++ mov $__HYPERVISOR_iret, %eax ++ syscall /* Do the IRET. */ ++#ifdef CONFIG_MITIGATION_SLS ++ int3 ++#endif ++.endm ++ + SYM_CODE_START(xen_iret) + UNWIND_HINT_EMPTY +- pushq $0 +- jmp hypercall_iret ++ xen_hypercall_iret + SYM_CODE_END(xen_iret) + + /* +@@ -290,8 +300,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat + UNWIND_HINT_ENTRY + lea 16(%rsp), %rsp /* strip %rcx, %r11 */ + mov $-ENOSYS, %rax +- pushq $0 +- jmp hypercall_iret ++ xen_hypercall_iret + SYM_CODE_END(xen_entry_SYSENTER_compat) + SYM_CODE_END(xen_entry_SYSCALL_compat) + diff --git a/queue-5.15/x86-xen-remove-hypercall-page.patch b/queue-5.15/x86-xen-remove-hypercall-page.patch new file mode 100644 index 00000000000..17dc8206651 --- /dev/null +++ b/queue-5.15/x86-xen-remove-hypercall-page.patch @@ -0,0 +1,127 @@ +From ee55bbe2d3994a88dd8f64960731a8c5c85da39d Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Thu, 17 Oct 2024 15:27:31 +0200 +Subject: x86/xen: remove hypercall page + +From: Juergen Gross + +commit 7fa0da5373685e7ed249af3fa317ab1e1ba8b0a6 upstream. + +The hypercall page is no longer needed. It can be removed, as from the +Xen perspective it is optional. + +But, from Linux's perspective, it removes naked RET instructions that +escape the speculative protections that Call Depth Tracking and/or +Untrain Ret are trying to achieve. + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Reviewed-by: Andrew Cooper +Reviewed-by: Jan Beulich +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/xen/hypercall.h | 2 -- + arch/x86/xen/enlighten.c | 2 -- + arch/x86/xen/enlighten_hvm.c | 9 +-------- + arch/x86/xen/enlighten_pvh.c | 7 ------- + arch/x86/xen/xen-head.S | 19 ------------------- + 5 files changed, 1 insertion(+), 38 deletions(-) + +--- a/arch/x86/include/asm/xen/hypercall.h ++++ b/arch/x86/include/asm/xen/hypercall.h +@@ -88,8 +88,6 @@ struct xen_dm_op_buf; + * there aren't more than 5 arguments...) + */ + +-extern struct { char _entry[32]; } hypercall_page[]; +- + void xen_hypercall_func(void); + DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func); + +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -26,8 +26,6 @@ + #include "smp.h" + #include "pmu.h" + +-EXPORT_SYMBOL_GPL(hypercall_page); +- + DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm); + EXPORT_STATIC_CALL_TRAMP(xen_hypercall); + +--- a/arch/x86/xen/enlighten_hvm.c ++++ b/arch/x86/xen/enlighten_hvm.c +@@ -101,15 +101,8 @@ static void __init init_hvm_pv_info(void + /* PVH set up hypercall page in xen_prepare_pvh(). */ + if (xen_pvh_domain()) + pv_info.name = "Xen PVH"; +- else { +- u64 pfn; +- uint32_t msr; +- ++ else + pv_info.name = "Xen HVM"; +- msr = cpuid_ebx(base + 2); +- pfn = __pa(hypercall_page); +- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); +- } + + xen_setup_features(); + +--- a/arch/x86/xen/enlighten_pvh.c ++++ b/arch/x86/xen/enlighten_pvh.c +@@ -27,17 +27,10 @@ EXPORT_SYMBOL_GPL(xen_pvh); + + void __init xen_pvh_init(struct boot_params *boot_params) + { +- u32 msr; +- u64 pfn; +- + xen_pvh = 1; + xen_domain_type = XEN_HVM_DOMAIN; + xen_start_flags = pvh_start_info.flags; + +- msr = cpuid_ebx(xen_cpuid_base() + 2); +- pfn = __pa(hypercall_page); +- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); +- + if (xen_initial_domain()) + x86_init.oem.arch_setup = xen_add_preferred_consoles; + x86_init.oem.banner = xen_banner; +--- a/arch/x86/xen/xen-head.S ++++ b/arch/x86/xen/xen-head.S +@@ -146,24 +146,6 @@ SYM_FUNC_START(xen_hypercall_intel) + SYM_FUNC_END(xen_hypercall_intel) + .popsection + +-.pushsection .text +- .balign PAGE_SIZE +-SYM_CODE_START(hypercall_page) +- .rept (PAGE_SIZE / 32) +- UNWIND_HINT_FUNC +- ANNOTATE_UNRET_SAFE +- ret +- .skip 31, 0xcc +- .endr +- +-#define HYPERCALL(n) \ +- .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ +- .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 +-#include +-#undef HYPERCALL +-SYM_CODE_END(hypercall_page) +-.popsection +- + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") + ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") +@@ -177,7 +159,6 @@ SYM_CODE_END(hypercall_page) + #ifdef CONFIG_XEN_PV + ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen) + #endif +- ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page) + ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, + .ascii "!writable_page_tables|pae_pgdir_above_4gb") + ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, diff --git a/queue-5.15/x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch b/queue-5.15/x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch new file mode 100644 index 00000000000..8ebce626610 --- /dev/null +++ b/queue-5.15/x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch @@ -0,0 +1,118 @@ +From 3a2fe12cec3c5795b17610dd05bd0f8dd847df33 Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Thu, 17 Oct 2024 14:47:13 +0200 +Subject: x86/xen: use new hypercall functions instead of hypercall page + +From: Juergen Gross + +commit b1c2cb86f4a7861480ad54bb9a58df3cbebf8e92 upstream. + +Call the Xen hypervisor via the new xen_hypercall_func static-call +instead of the hypercall page. + +This is part of XSA-466 / CVE-2024-53241. + +Reported-by: Andrew Cooper +Signed-off-by: Juergen Gross +Co-developed-by: Peter Zijlstra +Co-developed-by: Josh Poimboeuf +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/xen/hypercall.h | 33 ++++++++++++++++++++------------- + 1 file changed, 20 insertions(+), 13 deletions(-) + +--- a/arch/x86/include/asm/xen/hypercall.h ++++ b/arch/x86/include/asm/xen/hypercall.h +@@ -39,9 +39,11 @@ + #include + #include + #include ++#include + + #include + ++#include + #include + #include + #include +@@ -91,9 +93,17 @@ extern struct { char _entry[32]; } hyper + void xen_hypercall_func(void); + DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func); + +-#define __HYPERCALL "call hypercall_page+%c[offset]" +-#define __HYPERCALL_ENTRY(x) \ +- [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0])) ++#ifdef MODULE ++#define __ADDRESSABLE_xen_hypercall ++#else ++#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall) ++#endif ++ ++#define __HYPERCALL \ ++ __ADDRESSABLE_xen_hypercall \ ++ "call __SCT__xen_hypercall" ++ ++#define __HYPERCALL_ENTRY(x) "a" (x) + + #ifdef CONFIG_X86_32 + #define __HYPERCALL_RETREG "eax" +@@ -151,7 +161,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h + __HYPERCALL_0ARG(); \ + asm volatile (__HYPERCALL \ + : __HYPERCALL_0PARAM \ +- : __HYPERCALL_ENTRY(name) \ ++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ + : __HYPERCALL_CLOBBER0); \ + (type)__res; \ + }) +@@ -162,7 +172,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h + __HYPERCALL_1ARG(a1); \ + asm volatile (__HYPERCALL \ + : __HYPERCALL_1PARAM \ +- : __HYPERCALL_ENTRY(name) \ ++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ + : __HYPERCALL_CLOBBER1); \ + (type)__res; \ + }) +@@ -173,7 +183,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h + __HYPERCALL_2ARG(a1, a2); \ + asm volatile (__HYPERCALL \ + : __HYPERCALL_2PARAM \ +- : __HYPERCALL_ENTRY(name) \ ++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ + : __HYPERCALL_CLOBBER2); \ + (type)__res; \ + }) +@@ -184,7 +194,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h + __HYPERCALL_3ARG(a1, a2, a3); \ + asm volatile (__HYPERCALL \ + : __HYPERCALL_3PARAM \ +- : __HYPERCALL_ENTRY(name) \ ++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ + : __HYPERCALL_CLOBBER3); \ + (type)__res; \ + }) +@@ -195,7 +205,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h + __HYPERCALL_4ARG(a1, a2, a3, a4); \ + asm volatile (__HYPERCALL \ + : __HYPERCALL_4PARAM \ +- : __HYPERCALL_ENTRY(name) \ ++ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ + : __HYPERCALL_CLOBBER4); \ + (type)__res; \ + }) +@@ -209,12 +219,9 @@ xen_single_call(unsigned int call, + __HYPERCALL_DECLS; + __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + +- if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) +- return -EINVAL; +- +- asm volatile(CALL_NOSPEC ++ asm volatile(__HYPERCALL + : __HYPERCALL_5PARAM +- : [thunk_target] "a" (&hypercall_page[call]) ++ : __HYPERCALL_ENTRY(call) + : __HYPERCALL_CLOBBER5); + + return (long)__res; diff --git a/queue-5.15/xen-netfront-fix-crash-when-removing-device.patch b/queue-5.15/xen-netfront-fix-crash-when-removing-device.patch new file mode 100644 index 00000000000..5b5ed90cabf --- /dev/null +++ b/queue-5.15/xen-netfront-fix-crash-when-removing-device.patch @@ -0,0 +1,47 @@ +From 74c4410948e951456344000979eb3dc4351fadeb Mon Sep 17 00:00:00 2001 +From: Juergen Gross +Date: Thu, 7 Nov 2024 16:17:00 +0100 +Subject: xen/netfront: fix crash when removing device + +From: Juergen Gross + +commit f9244fb55f37356f75c739c57323d9422d7aa0f8 upstream. + +When removing a netfront device directly after a suspend/resume cycle +it might happen that the queues have not been setup again, causing a +crash during the attempt to stop the queues another time. + +Fix that by checking the queues are existing before trying to stop +them. + +This is XSA-465 / CVE-2024-53240. + +Reported-by: Marek Marczykowski-Górecki +Fixes: d50b7914fae0 ("xen-netfront: Fix NULL sring after live migration") +Signed-off-by: Juergen Gross +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/xen-netfront.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -869,7 +869,7 @@ static netdev_tx_t xennet_start_xmit(str + static int xennet_close(struct net_device *dev) + { + struct netfront_info *np = netdev_priv(dev); +- unsigned int num_queues = dev->real_num_tx_queues; ++ unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0; + unsigned int i; + struct netfront_queue *queue; + netif_tx_stop_all_queues(np->netdev); +@@ -884,6 +884,9 @@ static void xennet_destroy_queues(struct + { + unsigned int i; + ++ if (!info->queues) ++ return; ++ + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; +