From: Greg Kroah-Hartman Date: Mon, 15 Apr 2024 13:15:04 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v5.15.156~18 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e7f634751fa4a41dc96d3a88f59a0c5630f491a5;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch x86-bugs-fix-bhi-documentation.patch x86-bugs-fix-return-type-of-spectre_bhi_state.patch x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch --- diff --git a/queue-5.15/series b/queue-5.15/series index 8f418a1c047..ae5660b60a5 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -34,3 +34,8 @@ x86-cpu-actually-turn-off-mitigations-by-default-for-speculation_mitigations-n.p selftests-timers-fix-abs-warning-in-posix_timers-test.patch x86-apic-force-native_apic_mem_read-to-use-the-mov-instruction.patch irqflags-explicitly-ignore-lockdep_hrtimer_exit-argument.patch +x86-bugs-fix-return-type-of-spectre_bhi_state.patch +x86-bugs-fix-bhi-documentation.patch +x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch +x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch +x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch diff --git a/queue-5.15/x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch b/queue-5.15/x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch new file mode 100644 index 00000000000..9402ec262c9 --- /dev/null +++ b/queue-5.15/x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch @@ -0,0 +1,133 @@ +From cb2db5bb04d7f778fbc1a1ea2507aab436f1bff3 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Wed, 10 Apr 2024 22:40:46 -0700 +Subject: x86/bugs: Cache the value of MSR_IA32_ARCH_CAPABILITIES + +From: Josh Poimboeuf + +commit cb2db5bb04d7f778fbc1a1ea2507aab436f1bff3 upstream. + +There's no need to keep reading MSR_IA32_ARCH_CAPABILITIES over and +over. It's even read in the BHI sysfs function which is a big no-no. +Just read it once and cache it. + +Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob") +Signed-off-by: Josh Poimboeuf +Signed-off-by: Ingo Molnar +Reviewed-by: Nikolay Borisov +Cc: Linus Torvalds +Cc: Sean Christopherson +Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 22 +++++++--------------- + 1 file changed, 7 insertions(+), 15 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -60,6 +60,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current) + u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; + EXPORT_SYMBOL_GPL(x86_pred_cmd); + ++static u64 __ro_after_init ia32_cap; ++ + static DEFINE_MUTEX(spec_ctrl_mutex); + + void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; +@@ -143,6 +145,8 @@ void __init cpu_select_mitigations(void) + x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; + } + ++ ia32_cap = x86_read_arch_cap_msr(); ++ + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); + spectre_v2_select_mitigation(); +@@ -307,8 +311,6 @@ static const char * const taa_strings[] + + static void __init taa_select_mitigation(void) + { +- u64 ia32_cap; +- + if (!boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_OFF; + return; +@@ -347,7 +349,6 @@ static void __init taa_select_mitigation + * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode + * update is required. + */ +- ia32_cap = x86_read_arch_cap_msr(); + if ( (ia32_cap & ARCH_CAP_MDS_NO) && + !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; +@@ -407,8 +408,6 @@ static const char * const mmio_strings[] + + static void __init mmio_select_mitigation(void) + { +- u64 ia32_cap; +- + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || + cpu_mitigations_off()) { +@@ -419,8 +418,6 @@ static void __init mmio_select_mitigatio + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; + +- ia32_cap = x86_read_arch_cap_msr(); +- + /* + * Enable CPU buffer clear mitigation for host and VMM, if also affected + * by MDS or TAA. Otherwise, enable mitigation for VMM only. +@@ -514,7 +511,7 @@ static void __init rfds_select_mitigatio + if (rfds_mitigation == RFDS_MITIGATION_OFF) + return; + +- if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR) ++ if (ia32_cap & ARCH_CAP_RFDS_CLEAR) + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + else + rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; +@@ -658,8 +655,6 @@ void update_srbds_msr(void) + + static void __init srbds_select_mitigation(void) + { +- u64 ia32_cap; +- + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + +@@ -668,7 +663,6 @@ static void __init srbds_select_mitigati + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. + */ +- ia32_cap = x86_read_arch_cap_msr(); + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; +@@ -812,7 +806,7 @@ static void __init gds_select_mitigation + /* Will verify below that mitigation _can_ be disabled */ + + /* No microcode */ +- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { ++ if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) { + if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it +@@ -1884,8 +1878,6 @@ static void update_indir_branch_cond(voi + /* Update the static key controlling the MDS CPU buffer clear in idle */ + static void update_mds_branch_idle(void) + { +- u64 ia32_cap = x86_read_arch_cap_msr(); +- + /* + * Enable the idle clearing if SMT is active on CPUs which are + * affected only by MSBDS and not any other MDS variant. +@@ -2797,7 +2789,7 @@ static const char *spectre_bhi_state(voi + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) + return "; BHI: SW loop, KVM: SW loop"; + else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && +- !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA)) ++ !(ia32_cap & ARCH_CAP_RRSBA)) + return "; BHI: Retpoline"; + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) + return "; BHI: Syscall hardening, KVM: SW loop"; diff --git a/queue-5.15/x86-bugs-fix-bhi-documentation.patch b/queue-5.15/x86-bugs-fix-bhi-documentation.patch new file mode 100644 index 00000000000..a3bba16990d --- /dev/null +++ b/queue-5.15/x86-bugs-fix-bhi-documentation.patch @@ -0,0 +1,88 @@ +From dfe648903f42296866d79f10d03f8c85c9dfba30 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Wed, 10 Apr 2024 22:40:45 -0700 +Subject: x86/bugs: Fix BHI documentation + +From: Josh Poimboeuf + +commit dfe648903f42296866d79f10d03f8c85c9dfba30 upstream. + +Fix up some inaccuracies in the BHI documentation. + +Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob") +Signed-off-by: Josh Poimboeuf +Signed-off-by: Ingo Molnar +Reviewed-by: Nikolay Borisov +Cc: Linus Torvalds +Cc: Sean Christopherson +Link: https://lore.kernel.org/r/8c84f7451bfe0dd08543c6082a383f390d4aa7e2.1712813475.git.jpoimboe@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/hw-vuln/spectre.rst | 15 ++++++++------- + Documentation/admin-guide/kernel-parameters.txt | 12 +++++++----- + 2 files changed, 15 insertions(+), 12 deletions(-) + +--- a/Documentation/admin-guide/hw-vuln/spectre.rst ++++ b/Documentation/admin-guide/hw-vuln/spectre.rst +@@ -439,11 +439,11 @@ The possible values in this file are: + - System is protected by retpoline + * - BHI: BHI_DIS_S + - System is protected by BHI_DIS_S +- * - BHI: SW loop; KVM SW loop ++ * - BHI: SW loop, KVM SW loop + - System is protected by software clearing sequence + * - BHI: Syscall hardening + - Syscalls are hardened against BHI +- * - BHI: Syscall hardening; KVM: SW loop ++ * - BHI: Syscall hardening, KVM: SW loop + - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence + + Full mitigation might require a microcode update from the CPU +@@ -716,13 +716,14 @@ For user space mitigation: + of the HW BHI control and the SW BHB clearing sequence. + + on +- unconditionally enable. ++ (default) Enable the HW or SW mitigation as ++ needed. + off +- unconditionally disable. ++ Disable the mitigation. + auto +- enable if hardware mitigation +- control(BHI_DIS_S) is available, otherwise +- enable alternate mitigation in KVM. ++ Enable the HW mitigation if needed, but ++ *don't* enable the SW mitigation except for KVM. ++ The system may be vulnerable. + + For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3093,6 +3093,7 @@ + reg_file_data_sampling=off [X86] + retbleed=off [X86] + spec_store_bypass_disable=off [X86,PPC] ++ spectre_bhi=off [X86] + spectre_v2_user=off [X86] + ssbd=force-off [ARM64] + tsx_async_abort=off [X86] +@@ -5410,11 +5411,12 @@ + deployment of the HW BHI control and the SW BHB + clearing sequence. + +- on - unconditionally enable. +- off - unconditionally disable. +- auto - (default) enable hardware mitigation +- (BHI_DIS_S) if available, otherwise enable +- alternate mitigation in KVM. ++ on - (default) Enable the HW or SW mitigation ++ as needed. ++ off - Disable the mitigation. ++ auto - Enable the HW mitigation if needed, but ++ *don't* enable the SW mitigation except ++ for KVM. The system may be vulnerable. + + spectre_v2= [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability. diff --git a/queue-5.15/x86-bugs-fix-return-type-of-spectre_bhi_state.patch b/queue-5.15/x86-bugs-fix-return-type-of-spectre_bhi_state.patch new file mode 100644 index 00000000000..065f08229ee --- /dev/null +++ b/queue-5.15/x86-bugs-fix-return-type-of-spectre_bhi_state.patch @@ -0,0 +1,39 @@ +From 04f4230e2f86a4e961ea5466eda3db8c1762004d Mon Sep 17 00:00:00 2001 +From: Daniel Sneddon +Date: Tue, 9 Apr 2024 16:08:05 -0700 +Subject: x86/bugs: Fix return type of spectre_bhi_state() + +From: Daniel Sneddon + +commit 04f4230e2f86a4e961ea5466eda3db8c1762004d upstream. + +The definition of spectre_bhi_state() incorrectly returns a const char +* const. This causes the a compiler warning when building with W=1: + + warning: type qualifiers ignored on function return type [-Wignored-qualifiers] + 2812 | static const char * const spectre_bhi_state(void) + +Remove the const qualifier from the pointer. + +Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob") +Reported-by: Sean Christopherson +Signed-off-by: Daniel Sneddon +Signed-off-by: Ingo Molnar +Cc: Linus Torvalds +Link: https://lore.kernel.org/r/20240409230806.1545822-1-daniel.sneddon@linux.intel.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -2788,7 +2788,7 @@ static char *pbrsb_eibrs_state(void) + } + } + +-static const char * const spectre_bhi_state(void) ++static const char *spectre_bhi_state(void) + { + if (!boot_cpu_has_bug(X86_BUG_BHI)) + return "; BHI: Not affected"; diff --git a/queue-5.15/x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch b/queue-5.15/x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch new file mode 100644 index 00000000000..c3b2437fdbd --- /dev/null +++ b/queue-5.15/x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch @@ -0,0 +1,302 @@ +From d0485730d2189ffe5d986d4e9e191f1e4d5ffd24 Mon Sep 17 00:00:00 2001 +From: Ingo Molnar +Date: Thu, 11 Apr 2024 09:25:36 +0200 +Subject: x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr' + +From: Ingo Molnar + +commit d0485730d2189ffe5d986d4e9e191f1e4d5ffd24 upstream. + +So we are using the 'ia32_cap' value in a number of places, +which got its name from MSR_IA32_ARCH_CAPABILITIES MSR register. + +But there's very little 'IA32' about it - this isn't 32-bit only +code, nor does it originate from there, it's just a historic +quirk that many Intel MSR names are prefixed with IA32_. + +This is already clear from the helper method around the MSR: +x86_read_arch_cap_msr(), which doesn't have the IA32 prefix. + +So rename 'ia32_cap' to 'x86_arch_cap_msr' to be consistent with +its role and with the naming of the helper function. + +Signed-off-by: Ingo Molnar +Cc: Josh Poimboeuf +Cc: Nikolay Borisov +Cc: Linus Torvalds +Cc: Sean Christopherson +Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 30 +++++++++++++------------- + arch/x86/kernel/cpu/common.c | 48 +++++++++++++++++++++---------------------- + 2 files changed, 39 insertions(+), 39 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -60,7 +60,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current) + u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; + EXPORT_SYMBOL_GPL(x86_pred_cmd); + +-static u64 __ro_after_init ia32_cap; ++static u64 __ro_after_init x86_arch_cap_msr; + + static DEFINE_MUTEX(spec_ctrl_mutex); + +@@ -145,7 +145,7 @@ void __init cpu_select_mitigations(void) + x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; + } + +- ia32_cap = x86_read_arch_cap_msr(); ++ x86_arch_cap_msr = x86_read_arch_cap_msr(); + + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); +@@ -349,8 +349,8 @@ static void __init taa_select_mitigation + * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode + * update is required. + */ +- if ( (ia32_cap & ARCH_CAP_MDS_NO) && +- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) ++ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) && ++ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + + /* +@@ -440,7 +440,7 @@ static void __init mmio_select_mitigatio + * be propagated to uncore buffers, clearing the Fill buffers on idle + * is required irrespective of SMT state. + */ +- if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) ++ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) + static_branch_enable(&mds_idle_clear); + + /* +@@ -450,10 +450,10 @@ static void __init mmio_select_mitigatio + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ +- if ((ia32_cap & ARCH_CAP_FB_CLEAR) || ++ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && +- !(ia32_cap & ARCH_CAP_MDS_NO))) ++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; +@@ -511,7 +511,7 @@ static void __init rfds_select_mitigatio + if (rfds_mitigation == RFDS_MITIGATION_OFF) + return; + +- if (ia32_cap & ARCH_CAP_RFDS_CLEAR) ++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR) + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + else + rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; +@@ -663,7 +663,7 @@ static void __init srbds_select_mitigati + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. + */ +- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && ++ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; + else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) +@@ -806,7 +806,7 @@ static void __init gds_select_mitigation + /* Will verify below that mitigation _can_ be disabled */ + + /* No microcode */ +- if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) { ++ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { + if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it +@@ -1518,14 +1518,14 @@ static enum spectre_v2_mitigation __init + /* Disable in-kernel use of non-RSB RET predictors */ + static void __init spec_ctrl_disable_kernel_rrsba(void) + { +- u64 ia32_cap; ++ u64 x86_arch_cap_msr; + + if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) + return; + +- ia32_cap = x86_read_arch_cap_msr(); ++ x86_arch_cap_msr = x86_read_arch_cap_msr(); + +- if (ia32_cap & ARCH_CAP_RRSBA) { ++ if (x86_arch_cap_msr & ARCH_CAP_RRSBA) { + x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; + update_spec_ctrl(x86_spec_ctrl_base); + } +@@ -1892,7 +1892,7 @@ static void update_mds_branch_idle(void) + if (sched_smt_active()) { + static_branch_enable(&mds_idle_clear); + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || +- (ia32_cap & ARCH_CAP_FBSDP_NO)) { ++ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { + static_branch_disable(&mds_idle_clear); + } + } +@@ -2789,7 +2789,7 @@ static const char *spectre_bhi_state(voi + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) + return "; BHI: SW loop, KVM: SW loop"; + else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && +- !(ia32_cap & ARCH_CAP_RRSBA)) ++ !(x86_arch_cap_msr & ARCH_CAP_RRSBA)) + return "; BHI: Retpoline"; + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) + return "; BHI: Syscall hardening, KVM: SW loop"; +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1198,25 +1198,25 @@ static bool __init cpu_matches(const str + + u64 x86_read_arch_cap_msr(void) + { +- u64 ia32_cap = 0; ++ u64 x86_arch_cap_msr = 0; + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) +- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); ++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr); + +- return ia32_cap; ++ return x86_arch_cap_msr; + } + +-static bool arch_cap_mmio_immune(u64 ia32_cap) ++static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr) + { +- return (ia32_cap & ARCH_CAP_FBSDP_NO && +- ia32_cap & ARCH_CAP_PSDP_NO && +- ia32_cap & ARCH_CAP_SBDR_SSDP_NO); ++ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO && ++ x86_arch_cap_msr & ARCH_CAP_PSDP_NO && ++ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO); + } + +-static bool __init vulnerable_to_rfds(u64 ia32_cap) ++static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr) + { + /* The "immunity" bit trumps everything else: */ +- if (ia32_cap & ARCH_CAP_RFDS_NO) ++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO) + return false; + + /* +@@ -1224,7 +1224,7 @@ static bool __init vulnerable_to_rfds(u6 + * indicate that mitigation is needed because guest is running on a + * vulnerable hardware or may migrate to such hardware: + */ +- if (ia32_cap & ARCH_CAP_RFDS_CLEAR) ++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR) + return true; + + /* Only consult the blacklist when there is no enumeration: */ +@@ -1233,11 +1233,11 @@ static bool __init vulnerable_to_rfds(u6 + + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { +- u64 ia32_cap = x86_read_arch_cap_msr(); ++ u64 x86_arch_cap_msr = x86_read_arch_cap_msr(); + + /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ + if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && +- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) ++ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO)) + setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); + + if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) +@@ -1249,7 +1249,7 @@ static void __init cpu_set_bug_bits(stru + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + + if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && +- !(ia32_cap & ARCH_CAP_SSB_NO) && ++ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) && + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + +@@ -1260,17 +1260,17 @@ static void __init cpu_set_bug_bits(stru + * Don't use AutoIBRS when SNP is enabled because it degrades host + * userspace indirect branch performance. + */ +- if ((ia32_cap & ARCH_CAP_IBRS_ALL) || ++ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || + (cpu_has(c, X86_FEATURE_AUTOIBRS) && + !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) { + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && +- !(ia32_cap & ARCH_CAP_PBRSB_NO)) ++ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO)) + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); + } + + if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && +- !(ia32_cap & ARCH_CAP_MDS_NO)) { ++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) { + setup_force_cpu_bug(X86_BUG_MDS); + if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) + setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); +@@ -1289,9 +1289,9 @@ static void __init cpu_set_bug_bits(stru + * TSX_CTRL check alone is not sufficient for cases when the microcode + * update is not present or running as guest that don't get TSX_CTRL. + */ +- if (!(ia32_cap & ARCH_CAP_TAA_NO) && ++ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) && + (cpu_has(c, X86_FEATURE_RTM) || +- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) ++ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))) + setup_force_cpu_bug(X86_BUG_TAA); + + /* +@@ -1317,7 +1317,7 @@ static void __init cpu_set_bug_bits(stru + * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, + * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. + */ +- if (!arch_cap_mmio_immune(ia32_cap)) { ++ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) { + if (cpu_matches(cpu_vuln_blacklist, MMIO)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) +@@ -1325,7 +1325,7 @@ static void __init cpu_set_bug_bits(stru + } + + if (!cpu_has(c, X86_FEATURE_BTC_NO)) { +- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) ++ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA)) + setup_force_cpu_bug(X86_BUG_RETBLEED); + } + +@@ -1338,7 +1338,7 @@ static void __init cpu_set_bug_bits(stru + * disabling AVX2. The only way to do this in HW is to clear XCR0[2], + * which means that AVX will be disabled. + */ +- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && ++ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) && + boot_cpu_has(X86_FEATURE_AVX)) + setup_force_cpu_bug(X86_BUG_GDS); + +@@ -1347,11 +1347,11 @@ static void __init cpu_set_bug_bits(stru + setup_force_cpu_bug(X86_BUG_SRSO); + } + +- if (vulnerable_to_rfds(ia32_cap)) ++ if (vulnerable_to_rfds(x86_arch_cap_msr)) + setup_force_cpu_bug(X86_BUG_RFDS); + + /* When virtualized, eIBRS could be hidden, assume vulnerable */ +- if (!(ia32_cap & ARCH_CAP_BHI_NO) && ++ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) && + !cpu_matches(cpu_vuln_whitelist, NO_BHI) && + (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || + boot_cpu_has(X86_FEATURE_HYPERVISOR))) +@@ -1361,7 +1361,7 @@ static void __init cpu_set_bug_bits(stru + return; + + /* Rogue Data Cache Load? No! */ +- if (ia32_cap & ARCH_CAP_RDCL_NO) ++ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO) + return; + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); diff --git a/queue-5.15/x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch b/queue-5.15/x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch new file mode 100644 index 00000000000..445b8ccdca7 --- /dev/null +++ b/queue-5.15/x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch @@ -0,0 +1,46 @@ +From acaa4b5c4c854b5009f4d4a5395b2609ad0f4937 Mon Sep 17 00:00:00 2001 +From: Kim Phillips +Date: Thu, 25 Jan 2024 22:11:02 -0600 +Subject: x86/speculation: Do not enable Automatic IBRS if SEV-SNP is enabled + +From: Kim Phillips + +commit acaa4b5c4c854b5009f4d4a5395b2609ad0f4937 upstream. + +Without SEV-SNP, Automatic IBRS protects only the kernel. But when +SEV-SNP is enabled, the Automatic IBRS protection umbrella widens to all +host-side code, including userspace. This protection comes at a cost: +reduced userspace indirect branch performance. + +To avoid this performance loss, don't use Automatic IBRS on SEV-SNP +hosts and all back to retpolines instead. + + [ mdr: squash in changes from review discussion. ] + +Signed-off-by: Kim Phillips +Signed-off-by: Michael Roth +Signed-off-by: Borislav Petkov (AMD) +Acked-by: Dave Hansen +Link: https://lore.kernel.org/r/20240126041126.1927228-3-michael.roth@amd.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/common.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1256,8 +1256,13 @@ static void __init cpu_set_bug_bits(stru + /* + * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature + * flag and protect from vendor-specific bugs via the whitelist. ++ * ++ * Don't use AutoIBRS when SNP is enabled because it degrades host ++ * userspace indirect branch performance. + */ +- if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) { ++ if ((ia32_cap & ARCH_CAP_IBRS_ALL) || ++ (cpu_has(c, X86_FEATURE_AUTOIBRS) && ++ !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) { + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && + !(ia32_cap & ARCH_CAP_PBRSB_NO))