--- /dev/null
+From cb2db5bb04d7f778fbc1a1ea2507aab436f1bff3 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+Date: Wed, 10 Apr 2024 22:40:46 -0700
+Subject: x86/bugs: Cache the value of MSR_IA32_ARCH_CAPABILITIES
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+commit cb2db5bb04d7f778fbc1a1ea2507aab436f1bff3 upstream.
+
+There's no need to keep reading MSR_IA32_ARCH_CAPABILITIES over and
+over. It's even read in the BHI sysfs function which is a big no-no.
+Just read it once and cache it.
+
+Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 22 +++++++---------------
+ 1 file changed, 7 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -60,6 +60,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current)
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
++static u64 __ro_after_init ia32_cap;
++
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+
+ void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
+@@ -143,6 +145,8 @@ void __init cpu_select_mitigations(void)
+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+ }
+
++ ia32_cap = x86_read_arch_cap_msr();
++
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
+ spectre_v2_select_mitigation();
+@@ -307,8 +311,6 @@ static const char * const taa_strings[]
+
+ static void __init taa_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+@@ -347,7 +349,6 @@ static void __init taa_select_mitigation
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+- ia32_cap = x86_read_arch_cap_msr();
+ if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+ !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+@@ -407,8 +408,6 @@ static const char * const mmio_strings[]
+
+ static void __init mmio_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+ cpu_mitigations_off()) {
+@@ -419,8 +418,6 @@ static void __init mmio_select_mitigatio
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ return;
+
+- ia32_cap = x86_read_arch_cap_msr();
+-
+ /*
+ * Enable CPU buffer clear mitigation for host and VMM, if also affected
+ * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+@@ -514,7 +511,7 @@ static void __init rfds_select_mitigatio
+ if (rfds_mitigation == RFDS_MITIGATION_OFF)
+ return;
+
+- if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
++ if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ else
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+@@ -658,8 +655,6 @@ void update_srbds_msr(void)
+
+ static void __init srbds_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+@@ -668,7 +663,6 @@ static void __init srbds_select_mitigati
+ * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+ * by Processor MMIO Stale Data vulnerability.
+ */
+- ia32_cap = x86_read_arch_cap_msr();
+ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+@@ -812,7 +806,7 @@ static void __init gds_select_mitigation
+ /* Will verify below that mitigation _can_ be disabled */
+
+ /* No microcode */
+- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++ if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
+ if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+@@ -1884,8 +1878,6 @@ static void update_indir_branch_cond(voi
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+- u64 ia32_cap = x86_read_arch_cap_msr();
+-
+ /*
+ * Enable the idle clearing if SMT is active on CPUs which are
+ * affected only by MSBDS and not any other MDS variant.
+@@ -2797,7 +2789,7 @@ static const char *spectre_bhi_state(voi
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ return "; BHI: SW loop, KVM: SW loop";
+ else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+- !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
++ !(ia32_cap & ARCH_CAP_RRSBA))
+ return "; BHI: Retpoline";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ return "; BHI: Syscall hardening, KVM: SW loop";
--- /dev/null
+From dfe648903f42296866d79f10d03f8c85c9dfba30 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+Date: Wed, 10 Apr 2024 22:40:45 -0700
+Subject: x86/bugs: Fix BHI documentation
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+commit dfe648903f42296866d79f10d03f8c85c9dfba30 upstream.
+
+Fix up some inaccuracies in the BHI documentation.
+
+Fixes: ec9404e40e8f ("x86/bhi: Add BHI mitigation knob")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/8c84f7451bfe0dd08543c6082a383f390d4aa7e2.1712813475.git.jpoimboe@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/spectre.rst | 15 ++++++++-------
+ Documentation/admin-guide/kernel-parameters.txt | 12 +++++++-----
+ 2 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -439,11 +439,11 @@ The possible values in this file are:
+ - System is protected by retpoline
+ * - BHI: BHI_DIS_S
+ - System is protected by BHI_DIS_S
+- * - BHI: SW loop; KVM SW loop
++ * - BHI: SW loop, KVM SW loop
+ - System is protected by software clearing sequence
+ * - BHI: Syscall hardening
+ - Syscalls are hardened against BHI
+- * - BHI: Syscall hardening; KVM: SW loop
++ * - BHI: Syscall hardening, KVM: SW loop
+ - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
+
+ Full mitigation might require a microcode update from the CPU
+@@ -716,13 +716,14 @@ For user space mitigation:
+ of the HW BHI control and the SW BHB clearing sequence.
+
+ on
+- unconditionally enable.
++ (default) Enable the HW or SW mitigation as
++ needed.
+ off
+- unconditionally disable.
++ Disable the mitigation.
+ auto
+- enable if hardware mitigation
+- control(BHI_DIS_S) is available, otherwise
+- enable alternate mitigation in KVM.
++ Enable the HW mitigation if needed, but
++ *don't* enable the SW mitigation except for KVM.
++ The system may be vulnerable.
+
+ For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3093,6 +3093,7 @@
+ reg_file_data_sampling=off [X86]
+ retbleed=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
++ spectre_bhi=off [X86]
+ spectre_v2_user=off [X86]
+ ssbd=force-off [ARM64]
+ tsx_async_abort=off [X86]
+@@ -5410,11 +5411,12 @@
+ deployment of the HW BHI control and the SW BHB
+ clearing sequence.
+
+- on - unconditionally enable.
+- off - unconditionally disable.
+- auto - (default) enable hardware mitigation
+- (BHI_DIS_S) if available, otherwise enable
+- alternate mitigation in KVM.
++ on - (default) Enable the HW or SW mitigation
++ as needed.
++ off - Disable the mitigation.
++ auto - Enable the HW mitigation if needed, but
++ *don't* enable the SW mitigation except
++ for KVM. The system may be vulnerable.
+
+ spectre_v2= [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability.
--- /dev/null
+From d0485730d2189ffe5d986d4e9e191f1e4d5ffd24 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Thu, 11 Apr 2024 09:25:36 +0200
+Subject: x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr'
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit d0485730d2189ffe5d986d4e9e191f1e4d5ffd24 upstream.
+
+So we are using the 'ia32_cap' value in a number of places,
+which got its name from MSR_IA32_ARCH_CAPABILITIES MSR register.
+
+But there's very little 'IA32' about it - this isn't 32-bit only
+code, nor does it originate from there, it's just a historic
+quirk that many Intel MSR names are prefixed with IA32_.
+
+This is already clear from the helper method around the MSR:
+x86_read_arch_cap_msr(), which doesn't have the IA32 prefix.
+
+So rename 'ia32_cap' to 'x86_arch_cap_msr' to be consistent with
+its role and with the naming of the helper function.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Nikolay Borisov <nik.borisov@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 30 +++++++++++++-------------
+ arch/x86/kernel/cpu/common.c | 48 +++++++++++++++++++++----------------------
+ 2 files changed, 39 insertions(+), 39 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -60,7 +60,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current)
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
+-static u64 __ro_after_init ia32_cap;
++static u64 __ro_after_init x86_arch_cap_msr;
+
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+
+@@ -145,7 +145,7 @@ void __init cpu_select_mitigations(void)
+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+ }
+
+- ia32_cap = x86_read_arch_cap_msr();
++ x86_arch_cap_msr = x86_read_arch_cap_msr();
+
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
+@@ -349,8 +349,8 @@ static void __init taa_select_mitigation
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+- if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
++ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
++ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+@@ -440,7 +440,7 @@ static void __init mmio_select_mitigatio
+ * be propagated to uncore buffers, clearing the Fill buffers on idle
+ * is required irrespective of SMT state.
+ */
+- if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
++ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+ static_branch_enable(&mds_idle_clear);
+
+ /*
+@@ -450,10 +450,10 @@ static void __init mmio_select_mitigatio
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ * affected systems.
+ */
+- if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
++ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+- !(ia32_cap & ARCH_CAP_MDS_NO)))
++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+ else
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+@@ -511,7 +511,7 @@ static void __init rfds_select_mitigatio
+ if (rfds_mitigation == RFDS_MITIGATION_OFF)
+ return;
+
+- if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ else
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+@@ -663,7 +663,7 @@ static void __init srbds_select_mitigati
+ * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+ * by Processor MMIO Stale Data vulnerability.
+ */
+- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
++ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+@@ -806,7 +806,7 @@ static void __init gds_select_mitigation
+ /* Will verify below that mitigation _can_ be disabled */
+
+ /* No microcode */
+- if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
++ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
+ if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+@@ -1518,14 +1518,14 @@ static enum spectre_v2_mitigation __init
+ /* Disable in-kernel use of non-RSB RET predictors */
+ static void __init spec_ctrl_disable_kernel_rrsba(void)
+ {
+- u64 ia32_cap;
++ u64 x86_arch_cap_msr;
+
+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ return;
+
+- ia32_cap = x86_read_arch_cap_msr();
++ x86_arch_cap_msr = x86_read_arch_cap_msr();
+
+- if (ia32_cap & ARCH_CAP_RRSBA) {
++ if (x86_arch_cap_msr & ARCH_CAP_RRSBA) {
+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ }
+@@ -1892,7 +1892,7 @@ static void update_mds_branch_idle(void)
+ if (sched_smt_active()) {
+ static_branch_enable(&mds_idle_clear);
+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+- (ia32_cap & ARCH_CAP_FBSDP_NO)) {
++ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+ static_branch_disable(&mds_idle_clear);
+ }
+ }
+@@ -2789,7 +2789,7 @@ static const char *spectre_bhi_state(voi
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ return "; BHI: SW loop, KVM: SW loop";
+ else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+- !(ia32_cap & ARCH_CAP_RRSBA))
++ !(x86_arch_cap_msr & ARCH_CAP_RRSBA))
+ return "; BHI: Retpoline";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ return "; BHI: Syscall hardening, KVM: SW loop";
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1198,25 +1198,25 @@ static bool __init cpu_matches(const str
+
+ u64 x86_read_arch_cap_msr(void)
+ {
+- u64 ia32_cap = 0;
++ u64 x86_arch_cap_msr = 0;
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+
+- return ia32_cap;
++ return x86_arch_cap_msr;
+ }
+
+-static bool arch_cap_mmio_immune(u64 ia32_cap)
++static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
+ {
+- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
+- ia32_cap & ARCH_CAP_PSDP_NO &&
+- ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
++ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
++ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
++ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
+ }
+
+-static bool __init vulnerable_to_rfds(u64 ia32_cap)
++static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
+ {
+ /* The "immunity" bit trumps everything else: */
+- if (ia32_cap & ARCH_CAP_RFDS_NO)
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
+ return false;
+
+ /*
+@@ -1224,7 +1224,7 @@ static bool __init vulnerable_to_rfds(u6
+ * indicate that mitigation is needed because guest is running on a
+ * vulnerable hardware or may migrate to such hardware:
+ */
+- if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ return true;
+
+ /* Only consult the blacklist when there is no enumeration: */
+@@ -1233,11 +1233,11 @@ static bool __init vulnerable_to_rfds(u6
+
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+- u64 ia32_cap = x86_read_arch_cap_msr();
++ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+
+ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
++ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
+ setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
+@@ -1249,7 +1249,7 @@ static void __init cpu_set_bug_bits(stru
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+@@ -1260,17 +1260,17 @@ static void __init cpu_set_bug_bits(stru
+ * Don't use AutoIBRS when SNP is enabled because it degrades host
+ * userspace indirect branch performance.
+ */
+- if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
++ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
+ (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
+ !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+- !(ia32_cap & ARCH_CAP_PBRSB_NO))
++ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ }
+
+ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+- !(ia32_cap & ARCH_CAP_MDS_NO)) {
++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
+ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+@@ -1289,9 +1289,9 @@ static void __init cpu_set_bug_bits(stru
+ * TSX_CTRL check alone is not sufficient for cases when the microcode
+ * update is not present or running as guest that don't get TSX_CTRL.
+ */
+- if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
++ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
+ (cpu_has(c, X86_FEATURE_RTM) ||
+- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
++ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
+ setup_force_cpu_bug(X86_BUG_TAA);
+
+ /*
+@@ -1317,7 +1317,7 @@ static void __init cpu_set_bug_bits(stru
+ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+ */
+- if (!arch_cap_mmio_immune(ia32_cap)) {
++ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
+ if (cpu_matches(cpu_vuln_blacklist, MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+@@ -1325,7 +1325,7 @@ static void __init cpu_set_bug_bits(stru
+ }
+
+ if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
++ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
+ setup_force_cpu_bug(X86_BUG_RETBLEED);
+ }
+
+@@ -1338,7 +1338,7 @@ static void __init cpu_set_bug_bits(stru
+ * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+ * which means that AVX will be disabled.
+ */
+- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
+ boot_cpu_has(X86_FEATURE_AVX))
+ setup_force_cpu_bug(X86_BUG_GDS);
+
+@@ -1347,11 +1347,11 @@ static void __init cpu_set_bug_bits(stru
+ setup_force_cpu_bug(X86_BUG_SRSO);
+ }
+
+- if (vulnerable_to_rfds(ia32_cap))
++ if (vulnerable_to_rfds(x86_arch_cap_msr))
+ setup_force_cpu_bug(X86_BUG_RFDS);
+
+ /* When virtualized, eIBRS could be hidden, assume vulnerable */
+- if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
++ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+ !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+@@ -1361,7 +1361,7 @@ static void __init cpu_set_bug_bits(stru
+ return;
+
+ /* Rogue Data Cache Load? No! */
+- if (ia32_cap & ARCH_CAP_RDCL_NO)
++ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);