]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Dec 2024 14:10:16 +0000 (15:10 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Dec 2024 14:10:16 +0000 (15:10 +0100)
added patches:
objtool-x86-allow-syscall-instruction.patch
x86-make-get_cpu_vendor-accessible-from-xen-code.patch
x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch
x86-xen-add-central-hypercall-functions.patch
x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch
x86-xen-remove-hypercall-page.patch
x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch
xen-netfront-fix-crash-when-removing-device.patch

queue-6.1/objtool-x86-allow-syscall-instruction.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-make-get_cpu_vendor-accessible-from-xen-code.patch [new file with mode: 0644]
queue-6.1/x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch [new file with mode: 0644]
queue-6.1/x86-xen-add-central-hypercall-functions.patch [new file with mode: 0644]
queue-6.1/x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch [new file with mode: 0644]
queue-6.1/x86-xen-remove-hypercall-page.patch [new file with mode: 0644]
queue-6.1/x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch [new file with mode: 0644]
queue-6.1/xen-netfront-fix-crash-when-removing-device.patch [new file with mode: 0644]

diff --git a/queue-6.1/objtool-x86-allow-syscall-instruction.patch b/queue-6.1/objtool-x86-allow-syscall-instruction.patch
new file mode 100644 (file)
index 0000000..1358774
--- /dev/null
@@ -0,0 +1,43 @@
+From bb2126a5c7ab919e66ac44c6d9a143a24fe3324f Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 29 Nov 2024 15:47:49 +0100
+Subject: objtool/x86: allow syscall instruction
+
+From: Juergen Gross <jgross@suse.com>
+
+commit dda014ba59331dee4f3b773a020e109932f4bd24 upstream.
+
+The syscall instruction is used in Xen PV mode for doing hypercalls.
+Allow syscall to be used in the kernel in case it is tagged with an
+unwind hint for objtool.
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Co-developed-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/check.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -3521,10 +3521,13 @@ static int validate_branch(struct objtoo
+                       break;
+               case INSN_CONTEXT_SWITCH:
+-                      if (func && (!next_insn || !next_insn->hint)) {
+-                              WARN_FUNC("unsupported instruction in callable function",
+-                                        sec, insn->offset);
+-                              return 1;
++                      if (func) {
++                              if (!next_insn || !next_insn->hint) {
++                                      WARN_FUNC("unsupported instruction in callable function",
++                                                sec, insn->offset);
++                                      return 1;
++                              }
++                              break;
+                       }
+                       return 0;
index 291ce0f5fafc33083faafff53ad08ebf4708dd5d..097bf6e4da1648d8640d9f1d8b404a0466178f3b 100644 (file)
@@ -66,3 +66,11 @@ bluetooth-sco-add-support-for-16-bits-transparent-vo.patch
 blk-iocost-avoid-using-clamp-on-inuse-in-__propagate.patch
 bpf-sync_linked_regs-must-preserve-subreg_def.patch
 tracing-kprobes-skip-symbol-counting-logic-for-module-symbols-in-create_local_trace_kprobe.patch
+xen-netfront-fix-crash-when-removing-device.patch
+x86-make-get_cpu_vendor-accessible-from-xen-code.patch
+objtool-x86-allow-syscall-instruction.patch
+x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch
+x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch
+x86-xen-add-central-hypercall-functions.patch
+x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch
+x86-xen-remove-hypercall-page.patch
diff --git a/queue-6.1/x86-make-get_cpu_vendor-accessible-from-xen-code.patch b/queue-6.1/x86-make-get_cpu_vendor-accessible-from-xen-code.patch
new file mode 100644 (file)
index 0000000..b50652c
--- /dev/null
@@ -0,0 +1,105 @@
+From ff875e7e93760ebecbbfcc8d3b67b0c325382c35 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 17 Oct 2024 08:29:48 +0200
+Subject: x86: make get_cpu_vendor() accessible from Xen code
+
+From: Juergen Gross <jgross@suse.com>
+
+commit efbcd61d9bebb771c836a3b8bfced8165633db7c upstream.
+
+In order to be able to differentiate between AMD and Intel based
+systems for very early hypercalls without having to rely on the Xen
+hypercall page, make get_cpu_vendor() non-static.
+
+Refactor early_cpu_init() for the same reason by splitting out the
+loop initializing cpu_devs() into an externally callable function.
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/processor.h |    2 ++
+ arch/x86/kernel/cpu/common.c     |   36 +++++++++++++++++++++---------------
+ 2 files changed, 23 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -199,6 +199,8 @@ static inline unsigned long long l1tf_pf
+       return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+ }
++void init_cpu_devs(void);
++void get_cpu_vendor(struct cpuinfo_x86 *c);
+ extern void early_cpu_init(void);
+ extern void identify_boot_cpu(void);
+ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -906,7 +906,7 @@ void detect_ht(struct cpuinfo_x86 *c)
+ #endif
+ }
+-static void get_cpu_vendor(struct cpuinfo_x86 *c)
++void get_cpu_vendor(struct cpuinfo_x86 *c)
+ {
+       char *v = c->x86_vendor_id;
+       int i;
+@@ -1672,15 +1672,11 @@ static void __init early_identify_cpu(st
+       detect_nopl();
+ }
+-void __init early_cpu_init(void)
++void __init init_cpu_devs(void)
+ {
+       const struct cpu_dev *const *cdev;
+       int count = 0;
+-#ifdef CONFIG_PROCESSOR_SELECT
+-      pr_info("KERNEL supported cpus:\n");
+-#endif
+-
+       for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
+               const struct cpu_dev *cpudev = *cdev;
+@@ -1688,20 +1684,30 @@ void __init early_cpu_init(void)
+                       break;
+               cpu_devs[count] = cpudev;
+               count++;
++      }
++}
++void __init early_cpu_init(void)
++{
+ #ifdef CONFIG_PROCESSOR_SELECT
+-              {
+-                      unsigned int j;
++      unsigned int i, j;
+-                      for (j = 0; j < 2; j++) {
+-                              if (!cpudev->c_ident[j])
+-                                      continue;
+-                              pr_info("  %s %s\n", cpudev->c_vendor,
+-                                      cpudev->c_ident[j]);
+-                      }
+-              }
++      pr_info("KERNEL supported cpus:\n");
+ #endif
++
++      init_cpu_devs();
++
++#ifdef CONFIG_PROCESSOR_SELECT
++      for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
++              for (j = 0; j < 2; j++) {
++                      if (!cpu_devs[i]->c_ident[j])
++                              continue;
++                      pr_info("  %s %s\n", cpu_devs[i]->c_vendor,
++                              cpu_devs[i]->c_ident[j]);
++              }
+       }
++#endif
++
+       early_identify_cpu(&boot_cpu_data);
+ }
diff --git a/queue-6.1/x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch b/queue-6.1/x86-static-call-provide-a-way-to-do-very-early-static-call-updates.patch
new file mode 100644 (file)
index 0000000..f724b6f
--- /dev/null
@@ -0,0 +1,178 @@
+From 8bdf635124c201de9e06455df72988b2fe451b53 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 29 Nov 2024 16:15:54 +0100
+Subject: x86/static-call: provide a way to do very early static-call updates
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 0ef8047b737d7480a5d4c46d956e97c190f13050 upstream.
+
+Add static_call_update_early() for updating static-call targets in
+very early boot.
+
+This will be needed for support of Xen guest type specific hypercall
+functions.
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Co-developed-by: Peter Zijlstra <peterz@infradead.org>
+Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/static_call.h |   15 +++++++++++++++
+ arch/x86/include/asm/sync_core.h   |    6 +++---
+ arch/x86/kernel/static_call.c      |    9 +++++++++
+ include/linux/compiler.h           |   37 ++++++++++++++++++++++++++-----------
+ include/linux/static_call.h        |    1 +
+ kernel/static_call_inline.c        |    2 +-
+ 6 files changed, 55 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/include/asm/static_call.h
++++ b/arch/x86/include/asm/static_call.h
+@@ -65,4 +65,19 @@
+ extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
++extern void __static_call_update_early(void *tramp, void *func);
++
++#define static_call_update_early(name, _func)                         \
++({                                                                    \
++      typeof(&STATIC_CALL_TRAMP(name)) __F = (_func);                 \
++      if (static_call_initialized) {                                  \
++              __static_call_update(&STATIC_CALL_KEY(name),            \
++                                   STATIC_CALL_TRAMP_ADDR(name), __F);\
++      } else {                                                        \
++              WRITE_ONCE(STATIC_CALL_KEY(name).func, _func);          \
++              __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
++                                         __F);                        \
++      }                                                               \
++})
++
+ #endif /* _ASM_STATIC_CALL_H */
+--- a/arch/x86/include/asm/sync_core.h
++++ b/arch/x86/include/asm/sync_core.h
+@@ -8,7 +8,7 @@
+ #include <asm/special_insns.h>
+ #ifdef CONFIG_X86_32
+-static inline void iret_to_self(void)
++static __always_inline void iret_to_self(void)
+ {
+       asm volatile (
+               "pushfl\n\t"
+@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
+               : ASM_CALL_CONSTRAINT : : "memory");
+ }
+ #else
+-static inline void iret_to_self(void)
++static __always_inline void iret_to_self(void)
+ {
+       unsigned int tmp;
+@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
+  * Like all of Linux's memory ordering operations, this is a
+  * compiler barrier as well.
+  */
+-static inline void sync_core(void)
++static __always_inline void sync_core(void)
+ {
+       /*
+        * The SERIALIZE instruction is the most straightforward way to
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -170,6 +170,15 @@ void arch_static_call_transform(void *si
+ }
+ EXPORT_SYMBOL_GPL(arch_static_call_transform);
++noinstr void __static_call_update_early(void *tramp, void *func)
++{
++      BUG_ON(system_state != SYSTEM_BOOTING);
++      BUG_ON(!early_boot_irqs_disabled);
++      BUG_ON(static_call_initialized);
++      __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
++      sync_core();
++}
++
+ #ifdef CONFIG_RETHUNK
+ /*
+  * This is called by apply_returns() to fix up static call trampolines,
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -205,28 +205,43 @@ void ftrace_likely_update(struct ftrace_
+ #endif /* __KERNEL__ */
++/**
++ * offset_to_ptr - convert a relative memory offset to an absolute pointer
++ * @off:      the address of the 32-bit offset value
++ */
++static inline void *offset_to_ptr(const int *off)
++{
++      return (void *)((unsigned long)off + *off);
++}
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef CONFIG_64BIT
++#define ARCH_SEL(a,b) a
++#else
++#define ARCH_SEL(a,b) b
++#endif
++
+ /*
+  * Force the compiler to emit 'sym' as a symbol, so that we can reference
+  * it from inline assembler. Necessary in case 'sym' could be inlined
+  * otherwise, or eliminated entirely due to lack of references that are
+  * visible to the compiler.
+  */
+-#define ___ADDRESSABLE(sym, __attrs) \
+-      static void * __used __attrs \
++#define ___ADDRESSABLE(sym, __attrs)                                          \
++      static void * __used __attrs                                            \
+               __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
++
+ #define __ADDRESSABLE(sym) \
+       ___ADDRESSABLE(sym, __section(".discard.addressable"))
+-/**
+- * offset_to_ptr - convert a relative memory offset to an absolute pointer
+- * @off:      the address of the 32-bit offset value
+- */
+-static inline void *offset_to_ptr(const int *off)
+-{
+-      return (void *)((unsigned long)off + *off);
+-}
++#define __ADDRESSABLE_ASM(sym)                                                \
++      .pushsection .discard.addressable,"aw";                         \
++      .align ARCH_SEL(8,4);                                           \
++      ARCH_SEL(.quad, .long) __stringify(sym);                        \
++      .popsection;
+-#endif /* __ASSEMBLY__ */
++#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
+ /* &a[0] degrades to a pointer: a different type from an array */
+ #define __must_be_array(a)    BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -138,6 +138,7 @@
+ #ifdef CONFIG_HAVE_STATIC_CALL
+ #include <asm/static_call.h>
++extern bool static_call_initialized;
+ /*
+  * Either @site or @tramp can be NULL.
+  */
+--- a/kernel/static_call_inline.c
++++ b/kernel/static_call_inline.c
+@@ -15,7 +15,7 @@ extern struct static_call_site __start_s
+ extern struct static_call_tramp_key __start_static_call_tramp_key[],
+                                   __stop_static_call_tramp_key[];
+-static bool static_call_initialized;
++bool static_call_initialized;
+ /* mutex to protect key modules/sites */
+ static DEFINE_MUTEX(static_call_mutex);
diff --git a/queue-6.1/x86-xen-add-central-hypercall-functions.patch b/queue-6.1/x86-xen-add-central-hypercall-functions.patch
new file mode 100644 (file)
index 0000000..15244e2
--- /dev/null
@@ -0,0 +1,326 @@
+From 5d50b7b42295bbcef08f954051fd92e0a2bcc27d Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 17 Oct 2024 11:00:52 +0200
+Subject: x86/xen: add central hypercall functions
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b4845bb6383821a9516ce30af3a27dc873e37fd4 upstream.
+
+Add generic hypercall functions usable for all normal (i.e. not iret)
+hypercalls. Depending on the guest type and the processor vendor
+different functions need to be used due to the to be used instruction
+for entering the hypervisor:
+
+- PV guests need to use syscall
+- HVM/PVH guests on Intel need to use vmcall
+- HVM/PVH guests on AMD and Hygon need to use vmmcall
+
+As PVH guests need to issue hypercalls very early during boot, there
+is a 4th hypercall function needed for HVM/PVH which can be used on
+Intel and AMD processors. It will check the vendor type and then set
+the Intel or AMD specific function to use via static_call().
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Co-developed-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/xen/hypercall.h |    3 +
+ arch/x86/xen/enlighten.c             |   65 +++++++++++++++++++++++++++
+ arch/x86/xen/enlighten_hvm.c         |    4 +
+ arch/x86/xen/enlighten_pv.c          |    4 +
+ arch/x86/xen/xen-asm.S               |   23 +++++++++
+ arch/x86/xen/xen-head.S              |   83 +++++++++++++++++++++++++++++++++++
+ arch/x86/xen/xen-ops.h               |    9 +++
+ 7 files changed, 190 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -88,6 +88,9 @@ struct xen_dm_op_buf;
+ extern struct { char _entry[32]; } hypercall_page[];
++void xen_hypercall_func(void);
++DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
++
+ #define __HYPERCALL           "call hypercall_page+%c[offset]"
+ #define __HYPERCALL_ENTRY(x)                                          \
+       [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -5,6 +5,7 @@
+ #endif
+ #include <linux/console.h>
+ #include <linux/cpu.h>
++#include <linux/instrumentation.h>
+ #include <linux/kexec.h>
+ #include <linux/slab.h>
+ #include <linux/panic_notifier.h>
+@@ -27,6 +28,9 @@
+ EXPORT_SYMBOL_GPL(hypercall_page);
++DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
++EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
++
+ /*
+  * Pointer to the xen_vcpu_info structure or
+  * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+@@ -72,6 +76,67 @@ EXPORT_SYMBOL(xen_start_flags);
+  */
+ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
++static __ref void xen_get_vendor(void)
++{
++      init_cpu_devs();
++      cpu_detect(&boot_cpu_data);
++      get_cpu_vendor(&boot_cpu_data);
++}
++
++void xen_hypercall_setfunc(void)
++{
++      if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
++              return;
++
++      if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
++              static_call_update(xen_hypercall, xen_hypercall_amd);
++      else
++              static_call_update(xen_hypercall, xen_hypercall_intel);
++}
++
++/*
++ * Evaluate processor vendor in order to select the correct hypercall
++ * function for HVM/PVH guests.
++ * Might be called very early in boot before vendor has been set by
++ * early_cpu_init().
++ */
++noinstr void *__xen_hypercall_setfunc(void)
++{
++      void (*func)(void);
++
++      /*
++       * Xen is supported only on CPUs with CPUID, so testing for
++       * X86_FEATURE_CPUID is a test for early_cpu_init() having been
++       * run.
++       *
++       * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
++       * dependency chain: it is being called via the xen_hypercall static
++       * call when running as a PVH or HVM guest. Hypercalls need to be
++       * noinstr due to PV guests using hypercalls in noinstr code. So we
++       * can safely tag the function body as "instrumentation ok", since
++       * the PV guest requirement is not of interest here (xen_get_vendor()
++       * calls noinstr functions, and static_call_update_early() might do
++       * so, too).
++       */
++      instrumentation_begin();
++
++      if (!boot_cpu_has(X86_FEATURE_CPUID))
++              xen_get_vendor();
++
++      if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
++              func = xen_hypercall_amd;
++      else
++              func = xen_hypercall_intel;
++
++      static_call_update_early(xen_hypercall, func);
++
++      instrumentation_end();
++
++      return func;
++}
++
+ static int xen_cpu_up_online(unsigned int cpu)
+ {
+       xen_init_lock_cpu(cpu);
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -299,6 +299,10 @@ static uint32_t __init xen_platform_hvm(
+       if (xen_pv_domain())
+               return 0;
++      /* Set correct hypercall function. */
++      if (xen_domain)
++              xen_hypercall_setfunc();
++
+       if (xen_pvh_domain() && nopv) {
+               /* Guest booting via the Xen-PVH boot entry goes here */
+               pr_info("\"nopv\" parameter is ignored in PVH guest\n");
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1248,6 +1248,9 @@ asmlinkage __visible void __init xen_sta
+       xen_domain_type = XEN_PV_DOMAIN;
+       xen_start_flags = xen_start_info->flags;
++      /* Interrupts are guaranteed to be off initially. */
++      early_boot_irqs_disabled = true;
++      static_call_update_early(xen_hypercall, xen_hypercall_pv);
+       xen_setup_features();
+@@ -1340,7 +1343,6 @@ asmlinkage __visible void __init xen_sta
+       WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
+       local_irq_disable();
+-      early_boot_irqs_disabled = true;
+       xen_raw_console_write("mapping kernel into physical memory\n");
+       xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -20,10 +20,33 @@
+ #include <linux/init.h>
+ #include <linux/linkage.h>
++#include <linux/objtool.h>
+ #include <../entry/calling.h>
+ .pushsection .noinstr.text, "ax"
+ /*
++ * PV hypercall interface to the hypervisor.
++ *
++ * Called via inline asm(), so better preserve %rcx and %r11.
++ *
++ * Input:
++ *    %eax: hypercall number
++ *    %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
++ * Output: %rax
++ */
++SYM_FUNC_START(xen_hypercall_pv)
++      ANNOTATE_NOENDBR
++      push %rcx
++      push %r11
++      UNWIND_HINT_SAVE
++      syscall
++      UNWIND_HINT_RESTORE
++      pop %r11
++      pop %rcx
++      RET
++SYM_FUNC_END(xen_hypercall_pv)
++
++/*
+  * Disabling events is simply a matter of making the event mask
+  * non-zero.
+  */
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -6,9 +6,11 @@
+ #include <linux/elfnote.h>
+ #include <linux/init.h>
++#include <linux/instrumentation.h>
+ #include <asm/boot.h>
+ #include <asm/asm.h>
++#include <asm/frame.h>
+ #include <asm/msr.h>
+ #include <asm/page_types.h>
+ #include <asm/percpu.h>
+@@ -80,6 +82,87 @@ SYM_CODE_END(asm_cpu_bringup_and_idle)
+ #endif
+ #endif
++      .pushsection .noinstr.text, "ax"
++/*
++ * Xen hypercall interface to the hypervisor.
++ *
++ * Input:
++ *     %eax: hypercall number
++ *   32-bit:
++ *     %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall
++ *   64-bit:
++ *     %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
++ * Output: %[er]ax
++ */
++SYM_FUNC_START(xen_hypercall_hvm)
++      ENDBR
++      FRAME_BEGIN
++      /* Save all relevant registers (caller save and arguments). */
++#ifdef CONFIG_X86_32
++      push %eax
++      push %ebx
++      push %ecx
++      push %edx
++      push %esi
++      push %edi
++#else
++      push %rax
++      push %rcx
++      push %rdx
++      push %rdi
++      push %rsi
++      push %r11
++      push %r10
++      push %r9
++      push %r8
++#ifdef CONFIG_FRAME_POINTER
++      pushq $0        /* Dummy push for stack alignment. */
++#endif
++#endif
++      /* Set the vendor specific function. */
++      call __xen_hypercall_setfunc
++      /* Set ZF = 1 if AMD, Restore saved registers. */
++#ifdef CONFIG_X86_32
++      lea xen_hypercall_amd, %ebx
++      cmp %eax, %ebx
++      pop %edi
++      pop %esi
++      pop %edx
++      pop %ecx
++      pop %ebx
++      pop %eax
++#else
++      lea xen_hypercall_amd(%rip), %rbx
++      cmp %rax, %rbx
++#ifdef CONFIG_FRAME_POINTER
++      pop %rax        /* Dummy pop. */
++#endif
++      pop %r8
++      pop %r9
++      pop %r10
++      pop %r11
++      pop %rsi
++      pop %rdi
++      pop %rdx
++      pop %rcx
++      pop %rax
++#endif
++      /* Use correct hypercall function. */
++      jz xen_hypercall_amd
++      jmp xen_hypercall_intel
++SYM_FUNC_END(xen_hypercall_hvm)
++
++SYM_FUNC_START(xen_hypercall_amd)
++      vmmcall
++      RET
++SYM_FUNC_END(xen_hypercall_amd)
++
++SYM_FUNC_START(xen_hypercall_intel)
++      vmcall
++      RET
++SYM_FUNC_END(xen_hypercall_intel)
++      .popsection
++
+       ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
+       ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION,  .asciz "2.6")
+       ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION,    .asciz "xen-3.0")
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -162,4 +162,13 @@ void xen_hvm_post_suspend(int suspend_ca
+ static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
+ #endif
++#ifdef CONFIG_XEN_PV
++void xen_hypercall_pv(void);
++#endif
++void xen_hypercall_hvm(void);
++void xen_hypercall_amd(void);
++void xen_hypercall_intel(void);
++void xen_hypercall_setfunc(void);
++void *__xen_hypercall_setfunc(void);
++
+ #endif /* XEN_OPS_H */
diff --git a/queue-6.1/x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch b/queue-6.1/x86-xen-don-t-do-pv-iret-hypercall-through-hypercall-page.patch
new file mode 100644 (file)
index 0000000..83a122b
--- /dev/null
@@ -0,0 +1,80 @@
+From 029d22549457568d54121b9c2020d0969ae29a7a Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 16 Oct 2024 10:40:26 +0200
+Subject: x86/xen: don't do PV iret hypercall through hypercall page
+
+From: Juergen Gross <jgross@suse.com>
+
+commit a2796dff62d6c6bfc5fbebdf2bee0d5ac0438906 upstream.
+
+Instead of jumping to the Xen hypercall page for doing the iret
+hypercall, directly code the required sequence in xen-asm.S.
+
+This is done in preparation of no longer using hypercall page at all,
+as it has shown to cause problems with speculation mitigations.
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/xen-asm.S |   27 ++++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -176,7 +176,6 @@ SYM_CODE_START(xen_early_idt_handler_arr
+ SYM_CODE_END(xen_early_idt_handler_array)
+       __FINIT
+-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+ /*
+  * Xen64 iret frame:
+  *
+@@ -186,17 +185,28 @@ hypercall_iret = hypercall_page + __HYPE
+  *    cs
+  *    rip             <-- standard iret frame
+  *
+- *    flags
++ *    flags           <-- xen_iret must push from here on
+  *
+- *    rcx             }
+- *    r11             }<-- pushed by hypercall page
+- * rsp->rax           }
++ *    rcx
++ *    r11
++ * rsp->rax
+  */
++.macro xen_hypercall_iret
++      pushq $0        /* Flags */
++      push %rcx
++      push %r11
++      push %rax
++      mov  $__HYPERVISOR_iret, %eax
++      syscall         /* Do the IRET. */
++#ifdef CONFIG_MITIGATION_SLS
++      int3
++#endif
++.endm
++
+ SYM_CODE_START(xen_iret)
+       UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR
+-      pushq $0
+-      jmp hypercall_iret
++      xen_hypercall_iret
+ SYM_CODE_END(xen_iret)
+ /*
+@@ -301,8 +311,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat
+       ENDBR
+       lea 16(%rsp), %rsp      /* strip %rcx, %r11 */
+       mov $-ENOSYS, %rax
+-      pushq $0
+-      jmp hypercall_iret
++      xen_hypercall_iret
+ SYM_CODE_END(xen_entry_SYSENTER_compat)
+ SYM_CODE_END(xen_entry_SYSCALL_compat)
diff --git a/queue-6.1/x86-xen-remove-hypercall-page.patch b/queue-6.1/x86-xen-remove-hypercall-page.patch
new file mode 100644 (file)
index 0000000..a846c52
--- /dev/null
@@ -0,0 +1,131 @@
+From 40ee48433dfdb406c69943871a144f95181ea68c Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 17 Oct 2024 15:27:31 +0200
+Subject: x86/xen: remove hypercall page
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 7fa0da5373685e7ed249af3fa317ab1e1ba8b0a6 upstream.
+
+The hypercall page is no longer needed. It can be removed, as from the
+Xen perspective it is optional.
+
+But, from Linux's perspective, it removes naked RET instructions that
+escape the speculative protections that Call Depth Tracking and/or
+Untrain Ret are trying to achieve.
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/xen/hypercall.h |    2 --
+ arch/x86/xen/enlighten.c             |    2 --
+ arch/x86/xen/enlighten_hvm.c         |    9 +--------
+ arch/x86/xen/enlighten_pvh.c         |    7 -------
+ arch/x86/xen/xen-head.S              |   23 -----------------------
+ 5 files changed, 1 insertion(+), 42 deletions(-)
+
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -88,8 +88,6 @@ struct xen_dm_op_buf;
+  * there aren't more than 5 arguments...)
+  */
+-extern struct { char _entry[32]; } hypercall_page[];
+-
+ void xen_hypercall_func(void);
+ DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -26,8 +26,6 @@
+ #include "smp.h"
+ #include "pmu.h"
+-EXPORT_SYMBOL_GPL(hypercall_page);
+-
+ DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
+ EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -108,15 +108,8 @@ static void __init init_hvm_pv_info(void
+       /* PVH set up hypercall page in xen_prepare_pvh(). */
+       if (xen_pvh_domain())
+               pv_info.name = "Xen PVH";
+-      else {
+-              u64 pfn;
+-              uint32_t msr;
+-
++      else
+               pv_info.name = "Xen HVM";
+-              msr = cpuid_ebx(base + 2);
+-              pfn = __pa(hypercall_page);
+-              wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+-      }
+       xen_setup_features();
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -27,17 +27,10 @@ EXPORT_SYMBOL_GPL(xen_pvh);
+ void __init xen_pvh_init(struct boot_params *boot_params)
+ {
+-      u32 msr;
+-      u64 pfn;
+-
+       xen_pvh = 1;
+       xen_domain_type = XEN_HVM_DOMAIN;
+       xen_start_flags = pvh_start_info.flags;
+-      msr = cpuid_ebx(xen_cpuid_base() + 2);
+-      pfn = __pa(hypercall_page);
+-      wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+-
+       if (xen_initial_domain())
+               x86_init.oem.arch_setup = xen_add_preferred_consoles;
+       x86_init.oem.banner = xen_banner;
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -22,28 +22,6 @@
+ #include <xen/interface/xen-mca.h>
+ #include <asm/xen/interface.h>
+-.pushsection .noinstr.text, "ax"
+-      .balign PAGE_SIZE
+-SYM_CODE_START(hypercall_page)
+-      .rept (PAGE_SIZE / 32)
+-              UNWIND_HINT_FUNC
+-              ANNOTATE_NOENDBR
+-              ANNOTATE_UNRET_SAFE
+-              ret
+-              /*
+-               * Xen will write the hypercall page, and sort out ENDBR.
+-               */
+-              .skip 31, 0xcc
+-      .endr
+-
+-#define HYPERCALL(n) \
+-      .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
+-      .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
+-#include <asm/xen-hypercalls.h>
+-#undef HYPERCALL
+-SYM_CODE_END(hypercall_page)
+-.popsection
+-
+ #ifdef CONFIG_XEN_PV
+       __INIT
+ SYM_CODE_START(startup_xen)
+@@ -176,7 +154,6 @@ SYM_FUNC_END(xen_hypercall_intel)
+ #ifdef CONFIG_XEN_PV
+       ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          _ASM_PTR startup_xen)
+ #endif
+-      ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
+       ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
+               .ascii "!writable_page_tables|pae_pgdir_above_4gb")
+       ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
diff --git a/queue-6.1/x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch b/queue-6.1/x86-xen-use-new-hypercall-functions-instead-of-hypercall-page.patch
new file mode 100644 (file)
index 0000000..3068f9d
--- /dev/null
@@ -0,0 +1,118 @@
+From f0305bb3e3abb3c66d3e895dc279caf086eacdaa Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 17 Oct 2024 14:47:13 +0200
+Subject: x86/xen: use new hypercall functions instead of hypercall page
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b1c2cb86f4a7861480ad54bb9a58df3cbebf8e92 upstream.
+
+Call the Xen hypervisor via the new xen_hypercall_func static-call
+instead of the hypercall page.
+
+This is part of XSA-466 / CVE-2024-53241.
+
+Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Co-developed-by: Peter Zijlstra <peterz@infradead.org>
+Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/xen/hypercall.h |   33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -39,9 +39,11 @@
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/pgtable.h>
++#include <linux/instrumentation.h>
+ #include <trace/events/xen.h>
++#include <asm/alternative.h>
+ #include <asm/page.h>
+ #include <asm/smap.h>
+ #include <asm/nospec-branch.h>
+@@ -91,9 +93,17 @@ extern struct { char _entry[32]; } hyper
+ void xen_hypercall_func(void);
+ DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
+-#define __HYPERCALL           "call hypercall_page+%c[offset]"
+-#define __HYPERCALL_ENTRY(x)                                          \
+-      [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
++#ifdef MODULE
++#define __ADDRESSABLE_xen_hypercall
++#else
++#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
++#endif
++
++#define __HYPERCALL                                   \
++      __ADDRESSABLE_xen_hypercall                     \
++      "call __SCT__xen_hypercall"
++
++#define __HYPERCALL_ENTRY(x)  "a" (x)
+ #ifdef CONFIG_X86_32
+ #define __HYPERCALL_RETREG    "eax"
+@@ -151,7 +161,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h
+       __HYPERCALL_0ARG();                                             \
+       asm volatile (__HYPERCALL                                       \
+                     : __HYPERCALL_0PARAM                              \
+-                    : __HYPERCALL_ENTRY(name)                         \
++                    : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)        \
+                     : __HYPERCALL_CLOBBER0);                          \
+       (type)__res;                                                    \
+ })
+@@ -162,7 +172,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h
+       __HYPERCALL_1ARG(a1);                                           \
+       asm volatile (__HYPERCALL                                       \
+                     : __HYPERCALL_1PARAM                              \
+-                    : __HYPERCALL_ENTRY(name)                         \
++                    : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)        \
+                     : __HYPERCALL_CLOBBER1);                          \
+       (type)__res;                                                    \
+ })
+@@ -173,7 +183,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h
+       __HYPERCALL_2ARG(a1, a2);                                       \
+       asm volatile (__HYPERCALL                                       \
+                     : __HYPERCALL_2PARAM                              \
+-                    : __HYPERCALL_ENTRY(name)                         \
++                    : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)        \
+                     : __HYPERCALL_CLOBBER2);                          \
+       (type)__res;                                                    \
+ })
+@@ -184,7 +194,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h
+       __HYPERCALL_3ARG(a1, a2, a3);                                   \
+       asm volatile (__HYPERCALL                                       \
+                     : __HYPERCALL_3PARAM                              \
+-                    : __HYPERCALL_ENTRY(name)                         \
++                    : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)        \
+                     : __HYPERCALL_CLOBBER3);                          \
+       (type)__res;                                                    \
+ })
+@@ -195,7 +205,7 @@ DECLARE_STATIC_CALL(xen_hypercall, xen_h
+       __HYPERCALL_4ARG(a1, a2, a3, a4);                               \
+       asm volatile (__HYPERCALL                                       \
+                     : __HYPERCALL_4PARAM                              \
+-                    : __HYPERCALL_ENTRY(name)                         \
++                    : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name)        \
+                     : __HYPERCALL_CLOBBER4);                          \
+       (type)__res;                                                    \
+ })
+@@ -209,12 +219,9 @@ xen_single_call(unsigned int call,
+       __HYPERCALL_DECLS;
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+-      if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+-              return -EINVAL;
+-
+-      asm volatile(CALL_NOSPEC
++      asm volatile(__HYPERCALL
+                    : __HYPERCALL_5PARAM
+-                   : [thunk_target] "a" (&hypercall_page[call])
++                   : __HYPERCALL_ENTRY(call)
+                    : __HYPERCALL_CLOBBER5);
+       return (long)__res;
diff --git a/queue-6.1/xen-netfront-fix-crash-when-removing-device.patch b/queue-6.1/xen-netfront-fix-crash-when-removing-device.patch
new file mode 100644 (file)
index 0000000..0777a7e
--- /dev/null
@@ -0,0 +1,47 @@
+From 7c4d72762f2abd6dac3ccf2d9d0e4c20f3b9540b Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 7 Nov 2024 16:17:00 +0100
+Subject: xen/netfront: fix crash when removing device
+
+From: Juergen Gross <jgross@suse.com>
+
+commit f9244fb55f37356f75c739c57323d9422d7aa0f8 upstream.
+
+When removing a netfront device directly after a suspend/resume cycle
+it might happen that the queues have not been setup again, causing a
+crash during the attempt to stop the queues another time.
+
+Fix that by checking the queues are existing before trying to stop
+them.
+
+This is XSA-465 / CVE-2024-53240.
+
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Fixes: d50b7914fae0 ("xen-netfront: Fix NULL sring after live migration")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -867,7 +867,7 @@ static netdev_tx_t xennet_start_xmit(str
+ static int xennet_close(struct net_device *dev)
+ {
+       struct netfront_info *np = netdev_priv(dev);
+-      unsigned int num_queues = dev->real_num_tx_queues;
++      unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
+       unsigned int i;
+       struct netfront_queue *queue;
+       netif_tx_stop_all_queues(np->netdev);
+@@ -882,6 +882,9 @@ static void xennet_destroy_queues(struct
+ {
+       unsigned int i;
++      if (!info->queues)
++              return;
++
+       for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];