]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 22 Apr 2025 08:24:51 +0000 (10:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 22 Apr 2025 08:24:51 +0000 (10:24 +0200)
added patches:
powerpc-rtas-prevent-spectre-v1-gadget-construction-in-sys_rtas.patch
x86-pvh-call-c-code-via-the-kernel-virtual-mapping.patch
x86-split_lock-fix-the-delayed-detection-logic.patch

queue-6.1/powerpc-rtas-prevent-spectre-v1-gadget-construction-in-sys_rtas.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-pvh-call-c-code-via-the-kernel-virtual-mapping.patch [new file with mode: 0644]
queue-6.1/x86-split_lock-fix-the-delayed-detection-logic.patch [new file with mode: 0644]

diff --git a/queue-6.1/powerpc-rtas-prevent-spectre-v1-gadget-construction-in-sys_rtas.patch b/queue-6.1/powerpc-rtas-prevent-spectre-v1-gadget-construction-in-sys_rtas.patch
new file mode 100644 (file)
index 0000000..71e64d8
--- /dev/null
@@ -0,0 +1,54 @@
+From 0974d03eb479384466d828d65637814bee6b26d7 Mon Sep 17 00:00:00 2001
+From: Nathan Lynch <nathanl@linux.ibm.com>
+Date: Thu, 30 May 2024 19:44:12 -0500
+Subject: powerpc/rtas: Prevent Spectre v1 gadget construction in sys_rtas()
+
+From: Nathan Lynch <nathanl@linux.ibm.com>
+
+commit 0974d03eb479384466d828d65637814bee6b26d7 upstream.
+
+Smatch warns:
+
+  arch/powerpc/kernel/rtas.c:1932 __do_sys_rtas() warn: potential
+  spectre issue 'args.args' [r] (local cap)
+
+The 'nargs' and 'nret' locals come directly from a user-supplied
+buffer and are used as indexes into a small stack-based array and as
+inputs to copy_to_user() after they are subject to bounds checks.
+
+Use array_index_nospec() after the bounds checks to clamp these values
+for speculative execution.
+
+Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
+Reported-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Breno Leitao <leitao@debian.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240530-sys_rtas-nargs-nret-v1-1-129acddd4d89@linux.ibm.com
+[Minor context change fixed]
+Signed-off-by: Cliff Liu <donghua.liu@windriver.com>
+Signed-off-by: He Zhe <Zhe.He@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/rtas.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -25,6 +25,7 @@
+ #include <linux/reboot.h>
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
++#include <linux/nospec.h>
+ #include <linux/of.h>
+ #include <linux/of_fdt.h>
+@@ -1178,6 +1179,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args _
+           || nargs + nret > ARRAY_SIZE(args.args))
+               return -EINVAL;
++      nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
++      nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
++
+       /* Copy in args. */
+       if (copy_from_user(args.args, uargs->args,
+                          nargs * sizeof(rtas_arg_t)) != 0)
index de36758a86f9f9f81a37d013eef487e5f5669d4f..b0cc8f77538db98d9871cdfe3365a6f65b291ced 100644 (file)
@@ -264,3 +264,6 @@ misc-pci_endpoint_test-avoid-issue-of-interrupts-remaining-after-request_irq-err
 misc-pci_endpoint_test-fix-displaying-irq_type-after-request_irq-error.patch
 misc-pci_endpoint_test-fix-irq_type-to-convey-the-correct-type.patch
 mm-fix-is_zero_page-usage-in-try_grab_page.patch
+x86-split_lock-fix-the-delayed-detection-logic.patch
+x86-pvh-call-c-code-via-the-kernel-virtual-mapping.patch
+powerpc-rtas-prevent-spectre-v1-gadget-construction-in-sys_rtas.patch
diff --git a/queue-6.1/x86-pvh-call-c-code-via-the-kernel-virtual-mapping.patch b/queue-6.1/x86-pvh-call-c-code-via-the-kernel-virtual-mapping.patch
new file mode 100644 (file)
index 0000000..3cb11a9
--- /dev/null
@@ -0,0 +1,49 @@
+From e8fbc0d9cab6c1ee6403f42c0991b0c1d5dbc092 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Wed, 9 Oct 2024 18:04:40 +0200
+Subject: x86/pvh: Call C code via the kernel virtual mapping
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit e8fbc0d9cab6c1ee6403f42c0991b0c1d5dbc092 upstream.
+
+Calling C code via a different mapping than it was linked at is
+problematic, because the compiler assumes that RIP-relative and absolute
+symbol references are interchangeable. GCC in particular may use
+RIP-relative per-CPU variable references even when not using -fpic.
+
+So call xen_prepare_pvh() via its kernel virtual mapping on x86_64, so
+that those RIP-relative references produce the correct values. This
+matches the pre-existing behavior for i386, which also invokes
+xen_prepare_pvh() via the kernel virtual mapping before invoking
+startup_32 with paging disabled again.
+
+Fixes: 7243b93345f7 ("xen/pvh: Bootstrap PVH guest")
+Tested-by: Jason Andryuk <jason.andryuk@amd.com>
+Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20241009160438.3884381-8-ardb+git@google.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+[ Stable context update ]
+Signed-off-by: Jason Andryuk <jason.andryuk@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/platform/pvh/head.S |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/platform/pvh/head.S
++++ b/arch/x86/platform/pvh/head.S
+@@ -100,7 +100,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
+       xor %edx, %edx
+       wrmsr
+-      call xen_prepare_pvh
++      /* Call xen_prepare_pvh() via the kernel virtual mapping */
++      leaq xen_prepare_pvh(%rip), %rax
++      subq phys_base(%rip), %rax
++      addq $__START_KERNEL_map, %rax
++      ANNOTATE_RETPOLINE_SAFE
++      call *%rax
+       /* startup_64 expects boot_params in %rsi. */
+       mov $_pa(pvh_bootparams), %rsi
diff --git a/queue-6.1/x86-split_lock-fix-the-delayed-detection-logic.patch b/queue-6.1/x86-split_lock-fix-the-delayed-detection-logic.patch
new file mode 100644 (file)
index 0000000..bb0b207
--- /dev/null
@@ -0,0 +1,156 @@
+From c929d08df8bee855528b9d15b853c892c54e1eee Mon Sep 17 00:00:00 2001
+From: Maksim Davydov <davydov-max@yandex-team.ru>
+Date: Wed, 15 Jan 2025 16:17:04 +0300
+Subject: x86/split_lock: Fix the delayed detection logic
+
+From: Maksim Davydov <davydov-max@yandex-team.ru>
+
+commit c929d08df8bee855528b9d15b853c892c54e1eee upstream.
+
+If the warning mode with disabled mitigation mode is used, then on each
+CPU where the split lock occurred detection will be disabled in order to
+make progress and delayed work will be scheduled, which then will enable
+detection back.
+
+Now it turns out that all CPUs use one global delayed work structure.
+This leads to the fact that if a split lock occurs on several CPUs
+at the same time (within 2 jiffies), only one CPU will schedule delayed
+work, but the rest will not.
+
+The return value of schedule_delayed_work_on() would have shown this,
+but it is not checked in the code.
+
+A diagram that can help to understand the bug reproduction:
+
+ - sld_update_msr() enables/disables SLD on both CPUs on the same core
+
+ - schedule_delayed_work_on() internally checks WORK_STRUCT_PENDING_BIT.
+   If a work has the 'pending' status, then schedule_delayed_work_on()
+   will return an error code and, most importantly, the work will not
+   be placed in the workqueue.
+
+Let's say we have a multicore system on which split_lock_mitigate=0 and
+a multithreaded application is running that calls splitlock in multiple
+threads. Due to the fact that sld_update_msr() affects the entire core
+(both CPUs), we will consider 2 CPUs from different cores. Let the 2
+threads of this application schedule to CPU0 (core 0) and to CPU 2
+(core 1), then:
+
+|                                 ||                                   |
+|             CPU 0 (core 0)      ||          CPU 2 (core 1)           |
+|_________________________________||___________________________________|
+|                                 ||                                   |
+| 1) SPLIT LOCK occured           ||                                   |
+|                                 ||                                   |
+| 2) split_lock_warn()            ||                                   |
+|                                 ||                                   |
+| 3) sysctl_sld_mitigate == 0     ||                                   |
+|    (work = &sl_reenable)        ||                                   |
+|                                 ||                                   |
+| 4) schedule_delayed_work_on()   ||                                   |
+|    (reenable will be called     ||                                   |
+|     after 2 jiffies on CPU 0)   ||                                   |
+|                                 ||                                   |
+| 5) disable SLD for core 0       ||                                   |
+|                                 ||                                   |
+|    -------------------------    ||                                   |
+|                                 ||                                   |
+|                                 || 6) SPLIT LOCK occured             |
+|                                 ||                                   |
+|                                 || 7) split_lock_warn()              |
+|                                 ||                                   |
+|                                 || 8) sysctl_sld_mitigate == 0       |
+|                                 ||    (work = &sl_reenable,          |
+|                                 ||     the same address as in 3) )   |
+|                                 ||                                   |
+|            2 jiffies            || 9) schedule_delayed_work_on()     |
+|                                 ||    fials because the work is in   |
+|                                 ||    the pending state since 4).    |
+|                                 ||    The work wasn't placed to the  |
+|                                 ||    workqueue. reenable won't be   |
+|                                 ||    called on CPU 2                |
+|                                 ||                                   |
+|                                 || 10) disable SLD for core 0        |
+|                                 ||                                   |
+|                                 ||     From now on SLD will          |
+|                                 ||     never be reenabled on core 1  |
+|                                 ||                                   |
+|    -------------------------    ||                                   |
+|                                 ||                                   |
+|    11) enable SLD for core 0 by ||                                   |
+|        __split_lock_reenable    ||                                   |
+|                                 ||                                   |
+
+If the application threads can be scheduled to all processor cores,
+then over time there will be only one core left, on which SLD will be
+enabled and split lock will be able to be detected; and on all other
+cores SLD will be disabled all the time.
+
+Most likely, this bug has not been noticed for so long because
+sysctl_sld_mitigate default value is 1, and in this case a semaphore
+is used that does not allow 2 different cores to have SLD disabled at
+the same time, that is, strictly only one work is placed in the
+workqueue.
+
+In order to fix the warning mode with disabled mitigation mode,
+delayed work has to be per-CPU. Implement it.
+
+Fixes: 727209376f49 ("x86/split_lock: Add sysctl to control the misery mode")
+Signed-off-by: Maksim Davydov <davydov-max@yandex-team.ru>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ravi Bangoria <ravi.bangoria@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lore.kernel.org/r/20250115131704.132609-1-davydov-max@yandex-team.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/intel.c |   20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -1204,7 +1204,13 @@ static void __split_lock_reenable(struct
+ {
+       sld_update_msr(true);
+ }
+-static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
++/*
++ * In order for each CPU to schedule its delayed work independently of the
++ * others, delayed work struct must be per-CPU. This is not required when
++ * sysctl_sld_mitigate is enabled because of the semaphore that limits
++ * the number of simultaneously scheduled delayed works to 1.
++ */
++static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
+ /*
+  * If a CPU goes offline with pending delayed work to re-enable split lock
+@@ -1225,7 +1231,7 @@ static int splitlock_cpu_offline(unsigne
+ static void split_lock_warn(unsigned long ip)
+ {
+-      struct delayed_work *work;
++      struct delayed_work *work = NULL;
+       int cpu;
+       if (!current->reported_split_lock)
+@@ -1247,11 +1253,17 @@ static void split_lock_warn(unsigned lon
+               if (down_interruptible(&buslock_sem) == -EINTR)
+                       return;
+               work = &sl_reenable_unlock;
+-      } else {
+-              work = &sl_reenable;
+       }
+       cpu = get_cpu();
++
++      if (!work) {
++              work = this_cpu_ptr(&sl_reenable);
++              /* Deferred initialization of per-CPU struct */
++              if (!work->work.func)
++                      INIT_DELAYED_WORK(work, __split_lock_reenable);
++      }
++
+       schedule_delayed_work_on(cpu, work, 2);
+       /* Disable split lock detection on this CPU to make progress */