]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Nov 2024 06:08:54 +0000 (07:08 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Nov 2024 06:08:54 +0000 (07:08 +0100)
added patches:
io_uring-always-lock-__io_cqring_overflow_flush.patch
x86-bugs-use-code-segment-selector-for-verw-operand.patch

queue-6.1/io_uring-always-lock-__io_cqring_overflow_flush.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-bugs-use-code-segment-selector-for-verw-operand.patch [new file with mode: 0644]

diff --git a/queue-6.1/io_uring-always-lock-__io_cqring_overflow_flush.patch b/queue-6.1/io_uring-always-lock-__io_cqring_overflow_flush.patch
new file mode 100644 (file)
index 0000000..40dbc40
--- /dev/null
@@ -0,0 +1,57 @@
+From 3f1c33f03386c481caf2044a836f3ca611094098 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 10 Apr 2024 02:26:54 +0100
+Subject: io_uring: always lock __io_cqring_overflow_flush
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+Commit 8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 upstream.
+
+Conditional locking is never great, in case of
+__io_cqring_overflow_flush(), which is a slow path, it's not justified.
+Don't handle IOPOLL separately, always grab uring_lock for overflow
+flushing.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/162947df299aa12693ac4b305dacedab32ec7976.1712708261.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -593,6 +593,8 @@ static bool __io_cqring_overflow_flush(s
+       bool all_flushed;
+       size_t cqe_size = sizeof(struct io_uring_cqe);
++      lockdep_assert_held(&ctx->uring_lock);
++
+       if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
+               return false;
+@@ -647,12 +649,9 @@ static bool io_cqring_overflow_flush(str
+       bool ret = true;
+       if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
+-              /* iopoll syncs against uring_lock, not completion_lock */
+-              if (ctx->flags & IORING_SETUP_IOPOLL)
+-                      mutex_lock(&ctx->uring_lock);
++              mutex_lock(&ctx->uring_lock);
+               ret = __io_cqring_overflow_flush(ctx, false);
+-              if (ctx->flags & IORING_SETUP_IOPOLL)
+-                      mutex_unlock(&ctx->uring_lock);
++              mutex_unlock(&ctx->uring_lock);
+       }
+       return ret;
+@@ -1405,6 +1404,8 @@ static int io_iopoll_check(struct io_rin
+       int ret = 0;
+       unsigned long check_cq;
++      lockdep_assert_held(&ctx->uring_lock);
++
+       if (!io_allowed_run_tw(ctx))
+               return -EEXIST;
index a9b3fb705b12ba5311dae2b10523ac81168fc809..f9f2770fb28c45e27d96e8b73aa47649e84981ad 100644 (file)
@@ -111,3 +111,5 @@ migrate_pages-separate-hugetlb-folios-migration.patch
 migrate_pages-restrict-number-of-pages-to-migrate-in.patch
 migrate_pages-split-unmap_and_move-to-_unmap-and-_mo.patch
 vmscan-migrate-fix-page-count-imbalance-on-node-stat.patch
+io_uring-always-lock-__io_cqring_overflow_flush.patch
+x86-bugs-use-code-segment-selector-for-verw-operand.patch
diff --git a/queue-6.1/x86-bugs-use-code-segment-selector-for-verw-operand.patch b/queue-6.1/x86-bugs-use-code-segment-selector-for-verw-operand.patch
new file mode 100644 (file)
index 0000000..1893f98
--- /dev/null
@@ -0,0 +1,81 @@
+From e4d2102018542e3ae5e297bc6e229303abff8a0f Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 26 Sep 2024 09:10:31 -0700
+Subject: x86/bugs: Use code segment selector for VERW operand
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit e4d2102018542e3ae5e297bc6e229303abff8a0f upstream.
+
+Robert Gill reported below #GP in 32-bit mode when dosemu software was
+executing vm86() system call:
+
+  general protection fault: 0000 [#1] PREEMPT SMP
+  CPU: 4 PID: 4610 Comm: dosemu.bin Not tainted 6.6.21-gentoo-x86 #1
+  Hardware name: Dell Inc. PowerEdge 1950/0H723K, BIOS 2.7.0 10/30/2010
+  EIP: restore_all_switch_stack+0xbe/0xcf
+  EAX: 00000000 EBX: 00000000 ECX: 00000000 EDX: 00000000
+  ESI: 00000000 EDI: 00000000 EBP: 00000000 ESP: ff8affdc
+  DS: 0000 ES: 0000 FS: 0000 GS: 0033 SS: 0068 EFLAGS: 00010046
+  CR0: 80050033 CR2: 00c2101c CR3: 04b6d000 CR4: 000406d0
+  Call Trace:
+   show_regs+0x70/0x78
+   die_addr+0x29/0x70
+   exc_general_protection+0x13c/0x348
+   exc_bounds+0x98/0x98
+   handle_exception+0x14d/0x14d
+   exc_bounds+0x98/0x98
+   restore_all_switch_stack+0xbe/0xcf
+   exc_bounds+0x98/0x98
+   restore_all_switch_stack+0xbe/0xcf
+
+This only happens in 32-bit mode when VERW based mitigations like MDS/RFDS
+are enabled. This is because segment registers with an arbitrary user value
+can result in #GP when executing VERW. Intel SDM vol. 2C documents the
+following behavior for VERW instruction:
+
+  #GP(0) - If a memory operand effective address is outside the CS, DS, ES,
+          FS, or GS segment limit.
+
+CLEAR_CPU_BUFFERS macro executes VERW instruction before returning to user
+space. Use %cs selector to reference VERW operand. This ensures VERW will
+not #GP for an arbitrary user %ds.
+
+[ mingo: Fixed the SOB chain. ]
+
+Fixes: a0e2dab44d22 ("x86/entry_32: Add VERW just before userspace transition")
+Reported-by: Robert Gill <rtgill82@gmail.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com
+Cc: stable@vger.kernel.org # 5.10+
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218707
+Closes: https://lore.kernel.org/all/8c77ccfd-d561-45a1-8ed5-6b75212c7a58@leemhuis.info/
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Suggested-by: Brian Gerst <brgerst@gmail.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -211,7 +211,16 @@
+  */
+ .macro CLEAR_CPU_BUFFERS
+       ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
+-      verw _ASM_RIP(mds_verw_sel)
++#ifdef CONFIG_X86_64
++      verw mds_verw_sel(%rip)
++#else
++      /*
++       * In 32bit mode, the memory operand must be a %cs reference. The data
++       * segments may not be usable (vm86 mode), and the stack segment may not
++       * be flat (ESPFIX32).
++       */
++      verw %cs:mds_verw_sel
++#endif
+ .Lskip_verw_\@:
+ .endm