]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Apr 2024 13:13:01 +0000 (15:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Apr 2024 13:13:01 +0000 (15:13 +0200)
added patches:
tls-fix-use-after-free-on-failed-backlog-decryption.patch
x86-cpu-enable-stibp-on-amd-if-automatic-ibrs-is-enabled.patch

queue-6.1/series
queue-6.1/tls-fix-use-after-free-on-failed-backlog-decryption.patch [new file with mode: 0644]
queue-6.1/x86-cpu-enable-stibp-on-amd-if-automatic-ibrs-is-enabled.patch [new file with mode: 0644]

index e4efc391e77ad837e8890964936093a39bf1d155..b35f7fa3d400a1e507e5f7615bb4e6200dd30bb6 100644 (file)
@@ -261,3 +261,5 @@ scsi-qla2xxx-fix-command-flush-on-cable-pull.patch
 scsi-qla2xxx-fix-double-free-of-fcport.patch
 scsi-qla2xxx-change-debug-message-during-driver-unload.patch
 scsi-qla2xxx-delay-i-o-abort-on-pci-error.patch
+x86-cpu-enable-stibp-on-amd-if-automatic-ibrs-is-enabled.patch
+tls-fix-use-after-free-on-failed-backlog-decryption.patch
diff --git a/queue-6.1/tls-fix-use-after-free-on-failed-backlog-decryption.patch b/queue-6.1/tls-fix-use-after-free-on-failed-backlog-decryption.patch
new file mode 100644 (file)
index 0000000..b13b400
--- /dev/null
@@ -0,0 +1,91 @@
+From 13114dc5543069f7b97991e3b79937b6da05f5b0 Mon Sep 17 00:00:00 2001
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Wed, 28 Feb 2024 23:44:00 +0100
+Subject: tls: fix use-after-free on failed backlog decryption
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+commit 13114dc5543069f7b97991e3b79937b6da05f5b0 upstream.
+
+When the decrypt request goes to the backlog and crypto_aead_decrypt
+returns -EBUSY, tls_do_decryption will wait until all async
+decryptions have completed. If one of them fails, tls_do_decryption
+will return -EBADMSG and tls_decrypt_sg jumps to the error path,
+releasing all the pages. But the pages have been passed to the async
+callback, and have already been released by tls_decrypt_done.
+
+The only true async case is when crypto_aead_decrypt returns
+ -EINPROGRESS. With -EBUSY, we already waited so we can tell
+tls_sw_recvmsg that the data is available for immediate copy, but we
+need to notify tls_decrypt_sg (via the new ->async_done flag) that the
+memory has already been released.
+
+Fixes: 859054147318 ("net: tls: handle backlogging of crypto requests")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Link: https://lore.kernel.org/r/4755dd8d9bebdefaa19ce1439b833d6199d4364c.1709132643.git.sd@queasysnail.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c |   24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -51,6 +51,7 @@ struct tls_decrypt_arg {
+       struct_group(inargs,
+       bool zc;
+       bool async;
++      bool async_done;
+       u8 tail;
+       );
+@@ -279,18 +280,19 @@ static int tls_do_decryption(struct sock
+       }
+       ret = crypto_aead_decrypt(aead_req);
++      if (ret == -EINPROGRESS)
++              return 0;
++
+       if (ret == -EBUSY) {
+               ret = tls_decrypt_async_wait(ctx);
++              darg->async_done = true;
++              /* all completions have run, we're not doing async anymore */
++              darg->async = false;
++              return ret;
+               ret = ret ?: -EINPROGRESS;
+       }
+-      if (ret == -EINPROGRESS) {
+-              if (darg->async)
+-                      return 0;
+-              ret = crypto_wait_req(ret, &ctx->async_wait);
+-      } else if (darg->async) {
+-              atomic_dec(&ctx->decrypt_pending);
+-      }
++      atomic_dec(&ctx->decrypt_pending);
+       darg->async = false;
+       return ret;
+@@ -1681,8 +1683,11 @@ static int tls_decrypt_sg(struct sock *s
+       /* Prepare and submit AEAD request */
+       err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+                               data_len + prot->tail_size, aead_req, darg);
+-      if (err)
++      if (err) {
++              if (darg->async_done)
++                      goto exit_free_skb;
+               goto exit_free_pages;
++      }
+       darg->skb = clear_skb ?: tls_strp_msg(ctx);
+       clear_skb = NULL;
+@@ -1694,6 +1699,9 @@ static int tls_decrypt_sg(struct sock *s
+               return err;
+       }
++      if (unlikely(darg->async_done))
++              return 0;
++
+       if (prot->tail_size)
+               darg->tail = dctx->tail;
diff --git a/queue-6.1/x86-cpu-enable-stibp-on-amd-if-automatic-ibrs-is-enabled.patch b/queue-6.1/x86-cpu-enable-stibp-on-amd-if-automatic-ibrs-is-enabled.patch
new file mode 100644 (file)
index 0000000..e259019
--- /dev/null
@@ -0,0 +1,91 @@
+From fd470a8beed88440b160d690344fbae05a0b9b1b Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Thu, 20 Jul 2023 14:47:27 -0500
+Subject: x86/cpu: Enable STIBP on AMD if Automatic IBRS is enabled
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit fd470a8beed88440b160d690344fbae05a0b9b1b upstream.
+
+Unlike Intel's Enhanced IBRS feature, AMD's Automatic IBRS does not
+provide protection to processes running at CPL3/user mode, see section
+"Extended Feature Enable Register (EFER)" in the APM v2 at
+https://bugzilla.kernel.org/attachment.cgi?id=304652
+
+Explicitly enable STIBP to protect against cross-thread CPL3
+branch target injections on systems with Automatic IBRS enabled.
+
+Also update the relevant documentation.
+
+Fixes: e7862eda309e ("x86/cpu: Support AMD Automatic IBRS")
+Reported-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230720194727.67022-1-kim.phillips@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/spectre.rst |   11 +++++++----
+ arch/x86/kernel/cpu/bugs.c                    |   15 +++++++++------
+ 2 files changed, 16 insertions(+), 10 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -484,11 +484,14 @@ Spectre variant 2
+    Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
+    boot, by setting the IBRS bit, and they're automatically protected against
+-   Spectre v2 variant attacks, including cross-thread branch target injections
+-   on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
++   Spectre v2 variant attacks.
+-   Legacy IBRS systems clear the IBRS bit on exit to userspace and
+-   therefore explicitly enable STIBP for that
++   On Intel's enhanced IBRS systems, this includes cross-thread branch target
++   injections on SMT systems (STIBP). In other words, Intel eIBRS enables
++   STIBP, too.
++
++   AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
++   the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.
+    The retpoline mitigation is turned on by default on vulnerable
+    CPUs. It can be forced on or off by the administrator
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1354,19 +1354,21 @@ spectre_v2_user_select_mitigation(void)
+       }
+       /*
+-       * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
++       * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
+        * is not required.
+        *
+-       * Enhanced IBRS also protects against cross-thread branch target
++       * Intel's Enhanced IBRS also protects against cross-thread branch target
+        * injection in user-mode as the IBRS bit remains always set which
+        * implicitly enables cross-thread protections.  However, in legacy IBRS
+        * mode, the IBRS bit is set only on kernel entry and cleared on return
+-       * to userspace. This disables the implicit cross-thread protection,
+-       * so allow for STIBP to be selected in that case.
++       * to userspace.  AMD Automatic IBRS also does not protect userspace.
++       * These modes therefore disable the implicit cross-thread protection,
++       * so allow for STIBP to be selected in those cases.
+        */
+       if (!boot_cpu_has(X86_FEATURE_STIBP) ||
+           !smt_possible ||
+-          spectre_v2_in_eibrs_mode(spectre_v2_enabled))
++          (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
++           !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
+               return;
+       /*
+@@ -2666,7 +2668,8 @@ static ssize_t rfds_show_state(char *buf
+ static char *stibp_state(void)
+ {
+-      if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
++      if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
++          !boot_cpu_has(X86_FEATURE_AUTOIBRS))
+               return "";
+       switch (spectre_v2_user_stibp) {