From 6bdfb47828e83f54590f214d6fe6a5cca1ca455f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 18 Oct 2024 16:46:51 +0200 Subject: [PATCH] 6.11-stable patches added patches: iommu-vt-d-fix-incorrect-pci_for_each_dma_alias-for-non-pci-devices.patch kvm-s390-change-virtual-to-physical-address-access-in-diag-0x258-handler.patch kvm-s390-gaccess-check-if-guest-address-is-in-memslot.patch s390-sclp-deactivate-sclp-after-all-its-users.patch s390-sclp_vt220-convert-newlines-to-crlf-instead-of-lfcr.patch x86-bugs-do-not-use-untrain_ret-with-ibpb-on-entry.patch x86-bugs-skip-rsb-fill-at-vmexit.patch x86-cpufeatures-add-a-ibpb_no_ret-bug-flag.patch x86-cpufeatures-define-x86_feature_amd_ibpb_ret.patch x86-entry-have-entry_ibpb-invalidate-return-predictions.patch --- ...r_each_dma_alias-for-non-pci-devices.patch | 43 +++++++ ...address-access-in-diag-0x258-handler.patch | 42 +++++++ ...check-if-guest-address-is-in-memslot.patch | 118 ++++++++++++++++++ ...-deactivate-sclp-after-all-its-users.patch | 45 +++++++ ...ert-newlines-to-crlf-instead-of-lfcr.patch | 55 ++++++++ queue-6.11/series | 10 ++ ...t-use-untrain_ret-with-ibpb-on-entry.patch | 58 +++++++++ .../x86-bugs-skip-rsb-fill-at-vmexit.patch | 57 +++++++++ ...ufeatures-add-a-ibpb_no_ret-bug-flag.patch | 44 +++++++ ...ures-define-x86_feature_amd_ibpb_ret.patch | 46 +++++++ ...y_ibpb-invalidate-return-predictions.patch | 47 +++++++ 11 files changed, 565 insertions(+) create mode 100644 queue-6.11/iommu-vt-d-fix-incorrect-pci_for_each_dma_alias-for-non-pci-devices.patch create mode 100644 queue-6.11/kvm-s390-change-virtual-to-physical-address-access-in-diag-0x258-handler.patch create mode 100644 queue-6.11/kvm-s390-gaccess-check-if-guest-address-is-in-memslot.patch create mode 100644 queue-6.11/s390-sclp-deactivate-sclp-after-all-its-users.patch create mode 100644 queue-6.11/s390-sclp_vt220-convert-newlines-to-crlf-instead-of-lfcr.patch create mode 100644 queue-6.11/x86-bugs-do-not-use-untrain_ret-with-ibpb-on-entry.patch create mode 100644 queue-6.11/x86-bugs-skip-rsb-fill-at-vmexit.patch create mode 100644 queue-6.11/x86-cpufeatures-add-a-ibpb_no_ret-bug-flag.patch create mode 100644 queue-6.11/x86-cpufeatures-define-x86_feature_amd_ibpb_ret.patch create mode 100644 queue-6.11/x86-entry-have-entry_ibpb-invalidate-return-predictions.patch diff --git a/queue-6.11/iommu-vt-d-fix-incorrect-pci_for_each_dma_alias-for-non-pci-devices.patch b/queue-6.11/iommu-vt-d-fix-incorrect-pci_for_each_dma_alias-for-non-pci-devices.patch new file mode 100644 index 00000000000..7d904eefc27 --- /dev/null +++ b/queue-6.11/iommu-vt-d-fix-incorrect-pci_for_each_dma_alias-for-non-pci-devices.patch @@ -0,0 +1,43 @@ +From 6e02a277f1db24fa039e23783c8921c7b0e5b1b3 Mon Sep 17 00:00:00 2001 +From: Lu Baolu +Date: Mon, 14 Oct 2024 09:37:44 +0800 +Subject: iommu/vt-d: Fix incorrect pci_for_each_dma_alias() for non-PCI devices + +From: Lu Baolu + +commit 6e02a277f1db24fa039e23783c8921c7b0e5b1b3 upstream. + +Previously, the domain_context_clear() function incorrectly called +pci_for_each_dma_alias() to set up context entries for non-PCI devices. +This could lead to kernel hangs or other unexpected behavior. + +Add a check to only call pci_for_each_dma_alias() for PCI devices. For +non-PCI devices, domain_context_clear_one() is called directly. + +Reported-by: Todd Brandt +Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219363 +Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219349 +Fixes: 9a16ab9d6402 ("iommu/vt-d: Make context clearing consistent with context mapping") +Cc: stable@vger.kernel.org +Signed-off-by: Lu Baolu +Link: https://lore.kernel.org/r/20241014013744.102197-2-baolu.lu@linux.intel.com +Signed-off-by: Joerg Roedel +Signed-off-by: Greg Kroah-Hartman +--- + drivers/iommu/intel/iommu.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -3520,8 +3520,10 @@ static int domain_context_clear_one_cb(s + */ + static void domain_context_clear(struct device_domain_info *info) + { +- if (!dev_is_pci(info->dev)) ++ if (!dev_is_pci(info->dev)) { + domain_context_clear_one(info, info->bus, info->devfn); ++ return; ++ } + + pci_for_each_dma_alias(to_pci_dev(info->dev), + &domain_context_clear_one_cb, info); diff --git a/queue-6.11/kvm-s390-change-virtual-to-physical-address-access-in-diag-0x258-handler.patch b/queue-6.11/kvm-s390-change-virtual-to-physical-address-access-in-diag-0x258-handler.patch new file mode 100644 index 00000000000..a4eb2ffe29a --- /dev/null +++ b/queue-6.11/kvm-s390-change-virtual-to-physical-address-access-in-diag-0x258-handler.patch @@ -0,0 +1,42 @@ +From cad4b3d4ab1f062708fff33f44d246853f51e966 Mon Sep 17 00:00:00 2001 +From: Michael Mueller +Date: Tue, 17 Sep 2024 17:18:34 +0200 +Subject: KVM: s390: Change virtual to physical address access in diag 0x258 handler + +From: Michael Mueller + +commit cad4b3d4ab1f062708fff33f44d246853f51e966 upstream. + +The parameters for the diag 0x258 are real addresses, not virtual, but +KVM was using them as virtual addresses. This only happened to work, since +the Linux kernel as a guest used to have a 1:1 mapping for physical vs +virtual addresses. + +Fix KVM so that it correctly uses the addresses as real addresses. + +Cc: stable@vger.kernel.org +Fixes: 8ae04b8f500b ("KVM: s390: Guest's memory access functions get access registers") +Suggested-by: Vasily Gorbik +Signed-off-by: Michael Mueller +Signed-off-by: Nico Boehr +Reviewed-by: Christian Borntraeger +Reviewed-by: Heiko Carstens +Link: https://lore.kernel.org/r/20240917151904.74314-3-nrb@linux.ibm.com +Acked-by: Janosch Frank +Signed-off-by: Heiko Carstens +Signed-off-by: Greg Kroah-Hartman +--- + arch/s390/kvm/diag.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/s390/kvm/diag.c ++++ b/arch/s390/kvm/diag.c +@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struc + vcpu->stat.instruction_diagnose_258++; + if (vcpu->run->s.regs.gprs[rx] & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); +- rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); ++ rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) diff --git a/queue-6.11/kvm-s390-gaccess-check-if-guest-address-is-in-memslot.patch b/queue-6.11/kvm-s390-gaccess-check-if-guest-address-is-in-memslot.patch new file mode 100644 index 00000000000..7693907c61c --- /dev/null +++ b/queue-6.11/kvm-s390-gaccess-check-if-guest-address-is-in-memslot.patch @@ -0,0 +1,118 @@ +From e8061f06185be0a06a73760d6526b8b0feadfe52 Mon Sep 17 00:00:00 2001 +From: Nico Boehr +Date: Tue, 17 Sep 2024 17:18:33 +0200 +Subject: KVM: s390: gaccess: Check if guest address is in memslot + +From: Nico Boehr + +commit e8061f06185be0a06a73760d6526b8b0feadfe52 upstream. + +Previously, access_guest_page() did not check whether the given guest +address is inside of a memslot. This is not a problem, since +kvm_write_guest_page/kvm_read_guest_page return -EFAULT in this case. + +However, -EFAULT is also returned when copy_to/from_user fails. + +When emulating a guest instruction, the address being outside a memslot +usually means that an addressing exception should be injected into the +guest. + +Failure in copy_to/from_user however indicates that something is wrong +in userspace and hence should be handled there. + +To be able to distinguish these two cases, return PGM_ADDRESSING in +access_guest_page() when the guest address is outside guest memory. In +access_guest_real(), populate vcpu->arch.pgm.code such that +kvm_s390_inject_prog_cond() can be used in the caller for injecting into +the guest (if applicable). + +Since this adds a new return value to access_guest_page(), we need to make +sure that other callers are not confused by the new positive return value. + +There are the following users of access_guest_page(): +- access_guest_with_key() does the checking itself (in + guest_range_to_gpas()), so this case should never happen. Even if, the + handling is set up properly. +- access_guest_real() just passes the return code to its callers, which + are: + - read_guest_real() - see below + - write_guest_real() - see below + +There are the following users of read_guest_real(): +- ar_translation() in gaccess.c which already returns PGM_* +- setup_apcb10(), setup_apcb00(), setup_apcb11() in vsie.c which always + return -EFAULT on read_guest_read() nonzero return - no change +- shadow_crycb(), handle_stfle() always present this as validity, this + could be handled better but doesn't change current behaviour - no change + +There are the following users of write_guest_real(): +- kvm_s390_store_status_unloaded() always returns -EFAULT on + write_guest_real() failure. + +Fixes: 2293897805c2 ("KVM: s390: add architecture compliant guest access functions") +Cc: stable@vger.kernel.org +Signed-off-by: Nico Boehr +Reviewed-by: Heiko Carstens +Link: https://lore.kernel.org/r/20240917151904.74314-2-nrb@linux.ibm.com +Acked-by: Janosch Frank +Signed-off-by: Heiko Carstens +Signed-off-by: Greg Kroah-Hartman +--- + arch/s390/kvm/gaccess.c | 4 ++++ + arch/s390/kvm/gaccess.h | 14 ++++++++------ + 2 files changed, 12 insertions(+), 6 deletions(-) + +--- a/arch/s390/kvm/gaccess.c ++++ b/arch/s390/kvm/gaccess.c +@@ -828,6 +828,8 @@ static int access_guest_page(struct kvm + const gfn_t gfn = gpa_to_gfn(gpa); + int rc; + ++ if (!gfn_to_memslot(kvm, gfn)) ++ return PGM_ADDRESSING; + if (mode == GACC_STORE) + rc = kvm_write_guest_page(kvm, gfn, data, offset, len); + else +@@ -985,6 +987,8 @@ int access_guest_real(struct kvm_vcpu *v + gra += fragment_len; + data += fragment_len; + } ++ if (rc > 0) ++ vcpu->arch.pgm.code = rc; + return rc; + } + +--- a/arch/s390/kvm/gaccess.h ++++ b/arch/s390/kvm/gaccess.h +@@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu + * @len: number of bytes to copy + * + * Copy @len bytes from @data (kernel space) to @gra (guest real address). +- * It is up to the caller to ensure that the entire guest memory range is +- * valid memory before calling this function. + * Guest low address and key protection are not checked. + * +- * Returns zero on success or -EFAULT on error. ++ * Returns zero on success, -EFAULT when copying from @data failed, or ++ * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info ++ * is also stored to allow injecting into the guest (if applicable) using ++ * kvm_s390_inject_prog_cond(). + * + * If an error occurs data may have been copied partially to guest memory. + */ +@@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vc + * @len: number of bytes to copy + * + * Copy @len bytes from @gra (guest real address) to @data (kernel space). +- * It is up to the caller to ensure that the entire guest memory range is +- * valid memory before calling this function. + * Guest key protection is not checked. + * +- * Returns zero on success or -EFAULT on error. ++ * Returns zero on success, -EFAULT when copying to @data failed, or ++ * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info ++ * is also stored to allow injecting into the guest (if applicable) using ++ * kvm_s390_inject_prog_cond(). + * + * If an error occurs data may have been copied partially to kernel space. + */ diff --git a/queue-6.11/s390-sclp-deactivate-sclp-after-all-its-users.patch b/queue-6.11/s390-sclp-deactivate-sclp-after-all-its-users.patch new file mode 100644 index 00000000000..f1ddaa99611 --- /dev/null +++ b/queue-6.11/s390-sclp-deactivate-sclp-after-all-its-users.patch @@ -0,0 +1,45 @@ +From 0d9dc27df22d9b5c8dc7185c8dddbc14f5468518 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= +Date: Mon, 14 Oct 2024 07:50:06 +0200 +Subject: s390/sclp: Deactivate sclp after all its users +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Thomas Weißschuh + +commit 0d9dc27df22d9b5c8dc7185c8dddbc14f5468518 upstream. + +On reboot the SCLP interface is deactivated through a reboot notifier. +This happens before other components using SCLP have the chance to run +their own reboot notifiers. +Two of those components are the SCLP console and tty drivers which try +to flush the last outstanding messages. +At that point the SCLP interface is already unusable and the messages +are discarded. + +Execute sclp_deactivate() as late as possible to avoid this issue. + +Fixes: 4ae46db99cd8 ("s390/consoles: improve panic notifiers reliability") +Cc: stable@vger.kernel.org +Signed-off-by: Thomas Weißschuh +Reviewed-by: Sven Schnelle +Link: https://lore.kernel.org/r/20241014-s390-kunit-v1-1-941defa765a6@linutronix.de +Signed-off-by: Heiko Carstens +Signed-off-by: Greg Kroah-Hartman +--- + drivers/s390/char/sclp.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/s390/char/sclp.c ++++ b/drivers/s390/char/sclp.c +@@ -1195,7 +1195,8 @@ sclp_reboot_event(struct notifier_block + } + + static struct notifier_block sclp_reboot_notifier = { +- .notifier_call = sclp_reboot_event ++ .notifier_call = sclp_reboot_event, ++ .priority = INT_MIN, + }; + + static ssize_t con_pages_show(struct device_driver *dev, char *buf) diff --git a/queue-6.11/s390-sclp_vt220-convert-newlines-to-crlf-instead-of-lfcr.patch b/queue-6.11/s390-sclp_vt220-convert-newlines-to-crlf-instead-of-lfcr.patch new file mode 100644 index 00000000000..9a024a32a83 --- /dev/null +++ b/queue-6.11/s390-sclp_vt220-convert-newlines-to-crlf-instead-of-lfcr.patch @@ -0,0 +1,55 @@ +From dee3df68ab4b00fff6bdf9fc39541729af37307c Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= +Date: Mon, 14 Oct 2024 07:50:07 +0200 +Subject: s390/sclp_vt220: Convert newlines to CRLF instead of LFCR +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Thomas Weißschuh + +commit dee3df68ab4b00fff6bdf9fc39541729af37307c upstream. + +According to the VT220 specification the possible character combinations +sent on RETURN are only CR or CRLF [0]. + + The Return key sends either a CR character (0/13) or a CR + character (0/13) and an LF character (0/10), depending on the + set/reset state of line feed/new line mode (LNM). + +The sclp/vt220 driver however uses LFCR. This can confuse tools, for +example the kunit runner. + +Link: https://vt100.net/docs/vt220-rm/chapter3.html#S3.2 +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Signed-off-by: Thomas Weißschuh +Reviewed-by: Sven Schnelle +Link: https://lore.kernel.org/r/20241014-s390-kunit-v1-2-941defa765a6@linutronix.de +Signed-off-by: Heiko Carstens +Signed-off-by: Greg Kroah-Hartman +--- + drivers/s390/char/sclp_vt220.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/s390/char/sclp_vt220.c ++++ b/drivers/s390/char/sclp_vt220.c +@@ -319,7 +319,7 @@ sclp_vt220_add_msg(struct sclp_vt220_req + buffer = (void *) ((addr_t) sccb + sccb->header.length); + + if (convertlf) { +- /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/ ++ /* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/ + for (from=0, to=0; + (from < count) && (to < sclp_vt220_space_left(request)); + from++) { +@@ -328,8 +328,8 @@ sclp_vt220_add_msg(struct sclp_vt220_req + /* Perform conversion */ + if (c == 0x0a) { + if (to + 1 < sclp_vt220_space_left(request)) { +- ((unsigned char *) buffer)[to++] = c; + ((unsigned char *) buffer)[to++] = 0x0d; ++ ((unsigned char *) buffer)[to++] = c; + } else + break; + diff --git a/queue-6.11/series b/queue-6.11/series index 947b987dc20..0fa2655b63a 100644 --- a/queue-6.11/series +++ b/queue-6.11/series @@ -33,3 +33,13 @@ tcp-fix-mptcp-dss-corruption-due-to-large-pmtu-xmit.patch net-fec-move-fec_ptp_read-to-the-top-of-the-file.patch net-fec-remove-duplicated-code.patch mptcp-prevent-mpc-handshake-on-port-based-signal-endpoints.patch +iommu-vt-d-fix-incorrect-pci_for_each_dma_alias-for-non-pci-devices.patch +s390-sclp-deactivate-sclp-after-all-its-users.patch +s390-sclp_vt220-convert-newlines-to-crlf-instead-of-lfcr.patch +kvm-s390-gaccess-check-if-guest-address-is-in-memslot.patch +kvm-s390-change-virtual-to-physical-address-access-in-diag-0x258-handler.patch +x86-cpufeatures-define-x86_feature_amd_ibpb_ret.patch +x86-cpufeatures-add-a-ibpb_no_ret-bug-flag.patch +x86-entry-have-entry_ibpb-invalidate-return-predictions.patch +x86-bugs-skip-rsb-fill-at-vmexit.patch +x86-bugs-do-not-use-untrain_ret-with-ibpb-on-entry.patch diff --git a/queue-6.11/x86-bugs-do-not-use-untrain_ret-with-ibpb-on-entry.patch b/queue-6.11/x86-bugs-do-not-use-untrain_ret-with-ibpb-on-entry.patch new file mode 100644 index 00000000000..b853ae0503c --- /dev/null +++ b/queue-6.11/x86-bugs-do-not-use-untrain_ret-with-ibpb-on-entry.patch @@ -0,0 +1,58 @@ +From c62fa117c32bd1abed9304c58e0da6940f8c7fc2 Mon Sep 17 00:00:00 2001 +From: Johannes Wikner +Date: Tue, 8 Oct 2024 12:58:03 +0200 +Subject: x86/bugs: Do not use UNTRAIN_RET with IBPB on entry + +From: Johannes Wikner + +commit c62fa117c32bd1abed9304c58e0da6940f8c7fc2 upstream. + +Since X86_FEATURE_ENTRY_IBPB will invalidate all harmful predictions +with IBPB, no software-based untraining of returns is needed anymore. +Currently, this change affects retbleed and SRSO mitigations so if +either of the mitigations is doing IBPB and the other one does the +software sequence, the latter is not needed anymore. + + [ bp: Massage commit message. ] + +Suggested-by: Borislav Petkov +Signed-off-by: Johannes Wikner +Cc: +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 17 +++++++++++++++++ + 1 file changed, 17 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1113,6 +1113,15 @@ do_cmd_auto: + + case RETBLEED_MITIGATION_IBPB: + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); ++ ++ /* ++ * IBPB on entry already obviates the need for ++ * software-based untraining so clear those in case some ++ * other mitigation like SRSO has selected them. ++ */ ++ setup_clear_cpu_cap(X86_FEATURE_UNRET); ++ setup_clear_cpu_cap(X86_FEATURE_RETHUNK); ++ + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + mitigate_smt = true; + +@@ -2629,6 +2638,14 @@ static void __init srso_select_mitigatio + if (has_microcode) { + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + srso_mitigation = SRSO_MITIGATION_IBPB; ++ ++ /* ++ * IBPB on entry already obviates the need for ++ * software-based untraining so clear those in case some ++ * other mitigation like Retbleed has selected them. ++ */ ++ setup_clear_cpu_cap(X86_FEATURE_UNRET); ++ setup_clear_cpu_cap(X86_FEATURE_RETHUNK); + } + } else { + pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); diff --git a/queue-6.11/x86-bugs-skip-rsb-fill-at-vmexit.patch b/queue-6.11/x86-bugs-skip-rsb-fill-at-vmexit.patch new file mode 100644 index 00000000000..aa316ee42aa --- /dev/null +++ b/queue-6.11/x86-bugs-skip-rsb-fill-at-vmexit.patch @@ -0,0 +1,57 @@ +From 0fad2878642ec46225af2054564932745ac5c765 Mon Sep 17 00:00:00 2001 +From: Johannes Wikner +Date: Tue, 8 Oct 2024 12:36:30 +0200 +Subject: x86/bugs: Skip RSB fill at VMEXIT + +From: Johannes Wikner + +commit 0fad2878642ec46225af2054564932745ac5c765 upstream. + +entry_ibpb() is designed to follow Intel's IBPB specification regardless +of CPU. This includes invalidating RSB entries. + +Hence, if IBPB on VMEXIT has been selected, entry_ibpb() as part of the +RET untraining in the VMEXIT path will take care of all BTB and RSB +clearing so there's no need to explicitly fill the RSB anymore. + + [ bp: Massage commit message. ] + +Suggested-by: Borislav Petkov +Signed-off-by: Johannes Wikner +Cc: +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1115,6 +1115,14 @@ do_cmd_auto: + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + mitigate_smt = true; ++ ++ /* ++ * There is no need for RSB filling: entry_ibpb() ensures ++ * all predictions, including the RSB, are invalidated, ++ * regardless of IBPB implementation. ++ */ ++ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); ++ + break; + + case RETBLEED_MITIGATION_STUFF: +@@ -2632,6 +2640,13 @@ static void __init srso_select_mitigatio + if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; ++ ++ /* ++ * There is no need for RSB filling: entry_ibpb() ensures ++ * all predictions, including the RSB, are invalidated, ++ * regardless of IBPB implementation. ++ */ ++ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); + } + } else { + pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); diff --git a/queue-6.11/x86-cpufeatures-add-a-ibpb_no_ret-bug-flag.patch b/queue-6.11/x86-cpufeatures-add-a-ibpb_no_ret-bug-flag.patch new file mode 100644 index 00000000000..9d511deb6bf --- /dev/null +++ b/queue-6.11/x86-cpufeatures-add-a-ibpb_no_ret-bug-flag.patch @@ -0,0 +1,44 @@ +From 3ea87dfa31a7b0bb0ff1675e67b9e54883013074 Mon Sep 17 00:00:00 2001 +From: Johannes Wikner +Date: Mon, 23 Sep 2024 20:49:34 +0200 +Subject: x86/cpufeatures: Add a IBPB_NO_RET BUG flag + +From: Johannes Wikner + +commit 3ea87dfa31a7b0bb0ff1675e67b9e54883013074 upstream. + +Set this flag if the CPU has an IBPB implementation that does not +invalidate return target predictions. Zen generations < 4 do not flush +the RSB when executing an IBPB and this bug flag denotes that. + + [ bp: Massage. ] + +Signed-off-by: Johannes Wikner +Signed-off-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/kernel/cpu/common.c | 3 +++ + 2 files changed, 4 insertions(+) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -524,4 +524,5 @@ + #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ + #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ + #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ ++#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1443,6 +1443,9 @@ static void __init cpu_set_bug_bits(stru + boot_cpu_has(X86_FEATURE_HYPERVISOR))) + setup_force_cpu_bug(X86_BUG_BHI); + ++ if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) ++ setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + diff --git a/queue-6.11/x86-cpufeatures-define-x86_feature_amd_ibpb_ret.patch b/queue-6.11/x86-cpufeatures-define-x86_feature_amd_ibpb_ret.patch new file mode 100644 index 00000000000..7135da275dd --- /dev/null +++ b/queue-6.11/x86-cpufeatures-define-x86_feature_amd_ibpb_ret.patch @@ -0,0 +1,46 @@ +From ff898623af2ed564300752bba83a680a1e4fec8d Mon Sep 17 00:00:00 2001 +From: Jim Mattson +Date: Fri, 13 Sep 2024 10:32:27 -0700 +Subject: x86/cpufeatures: Define X86_FEATURE_AMD_IBPB_RET + +From: Jim Mattson + +commit ff898623af2ed564300752bba83a680a1e4fec8d upstream. + +AMD's initial implementation of IBPB did not clear the return address +predictor. Beginning with Zen4, AMD's IBPB *does* clear the return address +predictor. This behavior is enumerated by CPUID.80000008H:EBX.IBPB_RET[30]. + +Define X86_FEATURE_AMD_IBPB_RET for use in KVM_GET_SUPPORTED_CPUID, +when determining cross-vendor capabilities. + +Suggested-by: Venkatesh Srinivas +Signed-off-by: Jim Mattson +Signed-off-by: Borislav Petkov (AMD) +Reviewed-by: Tom Lendacky +Reviewed-by: Thomas Gleixner +Cc: +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -215,7 +215,7 @@ + #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ + #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */ + #define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */ +-#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */ + #define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */ + #define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */ + #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */ +@@ -348,6 +348,7 @@ + #define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */ + #define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ + #define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */ ++#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* IBPB clears return address predictor */ + #define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */ + + /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ diff --git a/queue-6.11/x86-entry-have-entry_ibpb-invalidate-return-predictions.patch b/queue-6.11/x86-entry-have-entry_ibpb-invalidate-return-predictions.patch new file mode 100644 index 00000000000..e196870d36d --- /dev/null +++ b/queue-6.11/x86-entry-have-entry_ibpb-invalidate-return-predictions.patch @@ -0,0 +1,47 @@ +From 50e4b3b94090babe8d4bb85c95f0d3e6b07ea86e Mon Sep 17 00:00:00 2001 +From: Johannes Wikner +Date: Mon, 23 Sep 2024 20:49:36 +0200 +Subject: x86/entry: Have entry_ibpb() invalidate return predictions + +From: Johannes Wikner + +commit 50e4b3b94090babe8d4bb85c95f0d3e6b07ea86e upstream. + +entry_ibpb() should invalidate all indirect predictions, including return +target predictions. Not all IBPB implementations do this, in which case the +fallback is RSB filling. + +Prevent SRSO-style hijacks of return predictions following IBPB, as the return +target predictor can be corrupted before the IBPB completes. + + [ bp: Massage. ] + +Signed-off-by: Johannes Wikner +Signed-off-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/entry/entry.S | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/x86/entry/entry.S ++++ b/arch/x86/entry/entry.S +@@ -9,6 +9,8 @@ + #include + #include + #include ++#include ++#include + + #include "calling.h" + +@@ -19,6 +21,9 @@ SYM_FUNC_START(entry_ibpb) + movl $PRED_CMD_IBPB, %eax + xorl %edx, %edx + wrmsr ++ ++ /* Make sure IBPB clears return stack preductions too. */ ++ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET + RET + SYM_FUNC_END(entry_ibpb) + /* For KVM */ -- 2.47.3