]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2024 13:15:33 +0000 (15:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Apr 2024 13:15:33 +0000 (15:15 +0200)
added patches:
x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch
x86-bugs-fix-return-type-of-spectre_bhi_state.patch
x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch
x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch

queue-6.6/series
queue-6.6/x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch [new file with mode: 0644]
queue-6.6/x86-bugs-fix-return-type-of-spectre_bhi_state.patch [new file with mode: 0644]
queue-6.6/x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch [new file with mode: 0644]
queue-6.6/x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch [new file with mode: 0644]

index 775862eefb15d1ed2d7a52152215449f6a63eae5..3921a8f4f4bb1c62cc092b529a2da478d6aacf60 100644 (file)
@@ -104,4 +104,8 @@ x86-cpu-actually-turn-off-mitigations-by-default-for-speculation_mitigations-n.p
 selftests-timers-fix-abs-warning-in-posix_timers-test.patch
 x86-apic-force-native_apic_mem_read-to-use-the-mov-instruction.patch
 irqflags-explicitly-ignore-lockdep_hrtimer_exit-argument.patch
+x86-bugs-fix-return-type-of-spectre_bhi_state.patch
 x86-bugs-fix-bhi-documentation.patch
+x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch
+x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch
+x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch
diff --git a/queue-6.6/x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch b/queue-6.6/x86-bugs-cache-the-value-of-msr_ia32_arch_capabilities.patch
new file mode 100644 (file)
index 0000000..dc332c7
--- /dev/null
@@ -0,0 +1,133 @@
+From cb2db5bb04d7f778fbc1a1ea2507aab436f1bff3 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+Date: Wed, 10 Apr 2024 22:40:46 -0700
+Subject: x86/bugs: Cache the value of MSR_IA32_ARCH_CAPABILITIES
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+commit cb2db5bb04d7f778fbc1a1ea2507aab436f1bff3 upstream.
+
+There's no need to keep reading MSR_IA32_ARCH_CAPABILITIES over and
+over.  It's even read in the BHI sysfs function which is a big no-no.
+Just read it once and cache it.
+
+Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |   22 +++++++---------------
+ 1 file changed, 7 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -61,6 +61,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current)
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
++static u64 __ro_after_init ia32_cap;
++
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
+               x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+       }
++      ia32_cap = x86_read_arch_cap_msr();
++
+       /* Select the proper CPU mitigations before patching alternatives: */
+       spectre_v1_select_mitigation();
+       spectre_v2_select_mitigation();
+@@ -301,8 +305,6 @@ static const char * const taa_strings[]
+ static void __init taa_select_mitigation(void)
+ {
+-      u64 ia32_cap;
+-
+       if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+               taa_mitigation = TAA_MITIGATION_OFF;
+               return;
+@@ -341,7 +343,6 @@ static void __init taa_select_mitigation
+        * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+        * update is required.
+        */
+-      ia32_cap = x86_read_arch_cap_msr();
+       if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+           !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+               taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+@@ -401,8 +402,6 @@ static const char * const mmio_strings[]
+ static void __init mmio_select_mitigation(void)
+ {
+-      u64 ia32_cap;
+-
+       if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+            boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+            cpu_mitigations_off()) {
+@@ -413,8 +412,6 @@ static void __init mmio_select_mitigatio
+       if (mmio_mitigation == MMIO_MITIGATION_OFF)
+               return;
+-      ia32_cap = x86_read_arch_cap_msr();
+-
+       /*
+        * Enable CPU buffer clear mitigation for host and VMM, if also affected
+        * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+@@ -508,7 +505,7 @@ static void __init rfds_select_mitigatio
+       if (rfds_mitigation == RFDS_MITIGATION_OFF)
+               return;
+-      if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
++      if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+       else
+               rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+@@ -659,8 +656,6 @@ void update_srbds_msr(void)
+ static void __init srbds_select_mitigation(void)
+ {
+-      u64 ia32_cap;
+-
+       if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+               return;
+@@ -669,7 +664,6 @@ static void __init srbds_select_mitigati
+        * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+        * by Processor MMIO Stale Data vulnerability.
+        */
+-      ia32_cap = x86_read_arch_cap_msr();
+       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+           !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+@@ -813,7 +807,7 @@ static void __init gds_select_mitigation
+       /* Will verify below that mitigation _can_ be disabled */
+       /* No microcode */
+-      if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++      if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
+               if (gds_mitigation == GDS_MITIGATION_FORCE) {
+                       /*
+                        * This only needs to be done on the boot CPU so do it
+@@ -1907,8 +1901,6 @@ static void update_indir_branch_cond(voi
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+-      u64 ia32_cap = x86_read_arch_cap_msr();
+-
+       /*
+        * Enable the idle clearing if SMT is active on CPUs which are
+        * affected only by MSBDS and not any other MDS variant.
+@@ -2817,7 +2809,7 @@ static const char *spectre_bhi_state(voi
+       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+               return "; BHI: SW loop, KVM: SW loop";
+       else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+-               !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
++               !(ia32_cap & ARCH_CAP_RRSBA))
+               return "; BHI: Retpoline";
+       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+               return "; BHI: Syscall hardening, KVM: SW loop";
diff --git a/queue-6.6/x86-bugs-fix-return-type-of-spectre_bhi_state.patch b/queue-6.6/x86-bugs-fix-return-type-of-spectre_bhi_state.patch
new file mode 100644 (file)
index 0000000..7224156
--- /dev/null
@@ -0,0 +1,39 @@
+From 04f4230e2f86a4e961ea5466eda3db8c1762004d Mon Sep 17 00:00:00 2001
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Date: Tue, 9 Apr 2024 16:08:05 -0700
+Subject: x86/bugs: Fix return type of spectre_bhi_state()
+
+From: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+
+commit 04f4230e2f86a4e961ea5466eda3db8c1762004d upstream.
+
+The definition of spectre_bhi_state() incorrectly returns a const char
+* const. This causes the a compiler warning when building with W=1:
+
+ warning: type qualifiers ignored on function return type [-Wignored-qualifiers]
+ 2812 | static const char * const spectre_bhi_state(void)
+
+Remove the const qualifier from the pointer.
+
+Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob")
+Reported-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20240409230806.1545822-1-daniel.sneddon@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2808,7 +2808,7 @@ static char *pbrsb_eibrs_state(void)
+       }
+ }
+-static const char * const spectre_bhi_state(void)
++static const char *spectre_bhi_state(void)
+ {
+       if (!boot_cpu_has_bug(X86_BUG_BHI))
+               return "; BHI: Not affected";
diff --git a/queue-6.6/x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch b/queue-6.6/x86-bugs-rename-various-ia32_cap-variables-to-x86_arch_cap_msr.patch
new file mode 100644 (file)
index 0000000..a2d1444
--- /dev/null
@@ -0,0 +1,316 @@
+From d0485730d2189ffe5d986d4e9e191f1e4d5ffd24 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Thu, 11 Apr 2024 09:25:36 +0200
+Subject: x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr'
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit d0485730d2189ffe5d986d4e9e191f1e4d5ffd24 upstream.
+
+So we are using the 'ia32_cap' value in a number of places,
+which got its name from MSR_IA32_ARCH_CAPABILITIES MSR register.
+
+But there's very little 'IA32' about it - this isn't 32-bit only
+code, nor does it originate from there, it's just a historic
+quirk that many Intel MSR names are prefixed with IA32_.
+
+This is already clear from the helper method around the MSR:
+x86_read_arch_cap_msr(), which doesn't have the IA32 prefix.
+
+So rename 'ia32_cap' to 'x86_arch_cap_msr' to be consistent with
+its role and with the naming of the helper function.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Nikolay Borisov <nik.borisov@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/apic/apic.c  |    6 ++---
+ arch/x86/kernel/cpu/bugs.c   |   30 +++++++++++++-------------
+ arch/x86/kernel/cpu/common.c |   48 +++++++++++++++++++++----------------------
+ 3 files changed, 42 insertions(+), 42 deletions(-)
+
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1724,11 +1724,11 @@ static int x2apic_state;
+ static bool x2apic_hw_locked(void)
+ {
+-      u64 ia32_cap;
++      u64 x86_arch_cap_msr;
+       u64 msr;
+-      ia32_cap = x86_read_arch_cap_msr();
+-      if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
++      x86_arch_cap_msr = x86_read_arch_cap_msr();
++      if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
+               rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
+               return (msr & LEGACY_XAPIC_DISABLED);
+       }
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current)
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+-static u64 __ro_after_init ia32_cap;
++static u64 __ro_after_init x86_arch_cap_msr;
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+@@ -146,7 +146,7 @@ void __init cpu_select_mitigations(void)
+               x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+       }
+-      ia32_cap = x86_read_arch_cap_msr();
++      x86_arch_cap_msr = x86_read_arch_cap_msr();
+       /* Select the proper CPU mitigations before patching alternatives: */
+       spectre_v1_select_mitigation();
+@@ -343,8 +343,8 @@ static void __init taa_select_mitigation
+        * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+        * update is required.
+        */
+-      if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+-          !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
++      if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
++          !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+               taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+       /*
+@@ -434,7 +434,7 @@ static void __init mmio_select_mitigatio
+        * be propagated to uncore buffers, clearing the Fill buffers on idle
+        * is required irrespective of SMT state.
+        */
+-      if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
++      if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+               static_branch_enable(&mds_idle_clear);
+       /*
+@@ -444,10 +444,10 @@ static void __init mmio_select_mitigatio
+        * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+        * affected systems.
+        */
+-      if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
++      if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+           (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+            boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+-           !(ia32_cap & ARCH_CAP_MDS_NO)))
++           !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+       else
+               mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+@@ -505,7 +505,7 @@ static void __init rfds_select_mitigatio
+       if (rfds_mitigation == RFDS_MITIGATION_OFF)
+               return;
+-      if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++      if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+       else
+               rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+@@ -664,7 +664,7 @@ static void __init srbds_select_mitigati
+        * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+        * by Processor MMIO Stale Data vulnerability.
+        */
+-      if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
++      if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+           !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+       else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+@@ -807,7 +807,7 @@ static void __init gds_select_mitigation
+       /* Will verify below that mitigation _can_ be disabled */
+       /* No microcode */
+-      if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
++      if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
+               if (gds_mitigation == GDS_MITIGATION_FORCE) {
+                       /*
+                        * This only needs to be done on the boot CPU so do it
+@@ -1540,14 +1540,14 @@ static enum spectre_v2_mitigation __init
+ /* Disable in-kernel use of non-RSB RET predictors */
+ static void __init spec_ctrl_disable_kernel_rrsba(void)
+ {
+-      u64 ia32_cap;
++      u64 x86_arch_cap_msr;
+       if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+               return;
+-      ia32_cap = x86_read_arch_cap_msr();
++      x86_arch_cap_msr = x86_read_arch_cap_msr();
+-      if (ia32_cap & ARCH_CAP_RRSBA) {
++      if (x86_arch_cap_msr & ARCH_CAP_RRSBA) {
+               x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+               update_spec_ctrl(x86_spec_ctrl_base);
+       }
+@@ -1915,7 +1915,7 @@ static void update_mds_branch_idle(void)
+       if (sched_smt_active()) {
+               static_branch_enable(&mds_idle_clear);
+       } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+-                 (ia32_cap & ARCH_CAP_FBSDP_NO)) {
++                 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+               static_branch_disable(&mds_idle_clear);
+       }
+ }
+@@ -2809,7 +2809,7 @@ static const char *spectre_bhi_state(voi
+       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+               return "; BHI: SW loop, KVM: SW loop";
+       else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+-               !(ia32_cap & ARCH_CAP_RRSBA))
++               !(x86_arch_cap_msr & ARCH_CAP_RRSBA))
+               return "; BHI: Retpoline";
+       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+               return "; BHI: Syscall hardening, KVM: SW loop";
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1329,25 +1329,25 @@ static bool __init cpu_matches(const str
+ u64 x86_read_arch_cap_msr(void)
+ {
+-      u64 ia32_cap = 0;
++      u64 x86_arch_cap_msr = 0;
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+-              rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++              rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+-      return ia32_cap;
++      return x86_arch_cap_msr;
+ }
+-static bool arch_cap_mmio_immune(u64 ia32_cap)
++static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
+ {
+-      return (ia32_cap & ARCH_CAP_FBSDP_NO &&
+-              ia32_cap & ARCH_CAP_PSDP_NO &&
+-              ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
++      return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
++              x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
++              x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
+ }
+-static bool __init vulnerable_to_rfds(u64 ia32_cap)
++static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
+ {
+       /* The "immunity" bit trumps everything else: */
+-      if (ia32_cap & ARCH_CAP_RFDS_NO)
++      if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
+               return false;
+       /*
+@@ -1355,7 +1355,7 @@ static bool __init vulnerable_to_rfds(u6
+        * indicate that mitigation is needed because guest is running on a
+        * vulnerable hardware or may migrate to such hardware:
+        */
+-      if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++      if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+               return true;
+       /* Only consult the blacklist when there is no enumeration: */
+@@ -1364,11 +1364,11 @@ static bool __init vulnerable_to_rfds(u6
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+-      u64 ia32_cap = x86_read_arch_cap_msr();
++      u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+       /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+       if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+-          !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
++          !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
+               setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+       if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
+@@ -1380,7 +1380,7 @@ static void __init cpu_set_bug_bits(stru
+               setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+       if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+-          !(ia32_cap & ARCH_CAP_SSB_NO) &&
++          !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
+          !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+@@ -1391,17 +1391,17 @@ static void __init cpu_set_bug_bits(stru
+        * Don't use AutoIBRS when SNP is enabled because it degrades host
+        * userspace indirect branch performance.
+        */
+-      if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
++      if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
+           (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
+            !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+               if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+-                  !(ia32_cap & ARCH_CAP_PBRSB_NO))
++                  !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
+                       setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+       }
+       if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+-          !(ia32_cap & ARCH_CAP_MDS_NO)) {
++          !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
+               setup_force_cpu_bug(X86_BUG_MDS);
+               if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
+                       setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+@@ -1420,9 +1420,9 @@ static void __init cpu_set_bug_bits(stru
+        * TSX_CTRL check alone is not sufficient for cases when the microcode
+        * update is not present or running as guest that don't get TSX_CTRL.
+        */
+-      if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
++      if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
+           (cpu_has(c, X86_FEATURE_RTM) ||
+-           (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
++           (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
+               setup_force_cpu_bug(X86_BUG_TAA);
+       /*
+@@ -1448,7 +1448,7 @@ static void __init cpu_set_bug_bits(stru
+        * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+        * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+        */
+-      if (!arch_cap_mmio_immune(ia32_cap)) {
++      if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
+               if (cpu_matches(cpu_vuln_blacklist, MMIO))
+                       setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+               else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+@@ -1456,7 +1456,7 @@ static void __init cpu_set_bug_bits(stru
+       }
+       if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+-              if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
++              if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
+                       setup_force_cpu_bug(X86_BUG_RETBLEED);
+       }
+@@ -1474,15 +1474,15 @@ static void __init cpu_set_bug_bits(stru
+        * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+        * which means that AVX will be disabled.
+        */
+-      if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++      if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
+           boot_cpu_has(X86_FEATURE_AVX))
+               setup_force_cpu_bug(X86_BUG_GDS);
+-      if (vulnerable_to_rfds(ia32_cap))
++      if (vulnerable_to_rfds(x86_arch_cap_msr))
+               setup_force_cpu_bug(X86_BUG_RFDS);
+       /* When virtualized, eIBRS could be hidden, assume vulnerable */
+-      if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
++      if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+           !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+           (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+            boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+@@ -1492,7 +1492,7 @@ static void __init cpu_set_bug_bits(stru
+               return;
+       /* Rogue Data Cache Load? No! */
+-      if (ia32_cap & ARCH_CAP_RDCL_NO)
++      if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
+               return;
+       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
diff --git a/queue-6.6/x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch b/queue-6.6/x86-speculation-do-not-enable-automatic-ibrs-if-sev-snp-is-enabled.patch
new file mode 100644 (file)
index 0000000..fff07dc
--- /dev/null
@@ -0,0 +1,46 @@
+From acaa4b5c4c854b5009f4d4a5395b2609ad0f4937 Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Thu, 25 Jan 2024 22:11:02 -0600
+Subject: x86/speculation: Do not enable Automatic IBRS if SEV-SNP is enabled
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit acaa4b5c4c854b5009f4d4a5395b2609ad0f4937 upstream.
+
+Without SEV-SNP, Automatic IBRS protects only the kernel. But when
+SEV-SNP is enabled, the Automatic IBRS protection umbrella widens to all
+host-side code, including userspace. This protection comes at a cost:
+reduced userspace indirect branch performance.
+
+To avoid this performance loss, don't use Automatic IBRS on SEV-SNP
+hosts and all back to retpolines instead.
+
+  [ mdr: squash in changes from review discussion. ]
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Michael Roth <michael.roth@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Link: https://lore.kernel.org/r/20240126041126.1927228-3-michael.roth@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1387,8 +1387,13 @@ static void __init cpu_set_bug_bits(stru
+       /*
+        * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
+        * flag and protect from vendor-specific bugs via the whitelist.
++       *
++       * Don't use AutoIBRS when SNP is enabled because it degrades host
++       * userspace indirect branch performance.
+        */
+-      if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
++      if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
++          (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
++           !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+               if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+                   !(ia32_cap & ARCH_CAP_PBRSB_NO))