]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Mar 2024 10:38:23 +0000 (11:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Mar 2024 10:38:23 +0000 (11:38 +0100)
added patches:
documentation-hw-vuln-update-spectre-doc.patch
x86-bugs-use-sysfs_emit.patch
x86-cpu-support-amd-automatic-ibrs.patch

queue-4.19/documentation-hw-vuln-update-spectre-doc.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-bugs-use-sysfs_emit.patch [new file with mode: 0644]
queue-4.19/x86-cpu-support-amd-automatic-ibrs.patch [new file with mode: 0644]

diff --git a/queue-4.19/documentation-hw-vuln-update-spectre-doc.patch b/queue-4.19/documentation-hw-vuln-update-spectre-doc.patch
new file mode 100644 (file)
index 0000000..d71af62
--- /dev/null
@@ -0,0 +1,33 @@
+From 06cb31cc761823ef444ba4e1df11347342a6e745 Mon Sep 17 00:00:00 2001
+From: Lin Yujun <linyujun809@huawei.com>
+Date: Tue, 30 Aug 2022 20:36:14 +0800
+Subject: Documentation/hw-vuln: Update spectre doc
+
+From: Lin Yujun <linyujun809@huawei.com>
+
+commit 06cb31cc761823ef444ba4e1df11347342a6e745 upstream.
+
+commit 7c693f54c873691 ("x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS")
+
+adds the "ibrs " option  in
+Documentation/admin-guide/kernel-parameters.txt but omits it to
+Documentation/admin-guide/hw-vuln/spectre.rst, add it.
+
+Signed-off-by: Lin Yujun <linyujun809@huawei.com>
+Link: https://lore.kernel.org/r/20220830123614.23007-1-linyujun809@huawei.com
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/spectre.rst |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -625,6 +625,7 @@ kernel command line.
+                 eibrs                   enhanced IBRS
+                 eibrs,retpoline         enhanced IBRS + Retpolines
+                 eibrs,lfence            enhanced IBRS + LFENCE
++                ibrs                    use IBRS to protect kernel
+               Not specifying this option is equivalent to
+               spectre_v2=auto.
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..aca95c98c5559abcade5dc539a1c5ce5e4777ce7 100644 (file)
@@ -0,0 +1,3 @@
+documentation-hw-vuln-update-spectre-doc.patch
+x86-cpu-support-amd-automatic-ibrs.patch
+x86-bugs-use-sysfs_emit.patch
diff --git a/queue-4.19/x86-bugs-use-sysfs_emit.patch b/queue-4.19/x86-bugs-use-sysfs_emit.patch
new file mode 100644 (file)
index 0000000..704ac70
--- /dev/null
@@ -0,0 +1,201 @@
+From 1d30800c0c0ae1d086ffad2bdf0ba4403370f132 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 9 Aug 2022 17:32:02 +0200
+Subject: x86/bugs: Use sysfs_emit()
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 1d30800c0c0ae1d086ffad2bdf0ba4403370f132 upstream.
+
+Those mitigations are very talkative; use the printing helper which pays
+attention to the buffer size.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lore.kernel.org/r/20220809153419.10182-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |   82 ++++++++++++++++++++++-----------------------
+ 1 file changed, 41 insertions(+), 41 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2137,69 +2137,69 @@ static const char * const l1tf_vmx_state
+ static ssize_t l1tf_show_state(char *buf)
+ {
+       if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+-              return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++              return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
+       if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+           (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+            sched_smt_active())) {
+-              return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+-                             l1tf_vmx_states[l1tf_vmx_mitigation]);
++              return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
++                                l1tf_vmx_states[l1tf_vmx_mitigation]);
+       }
+-      return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+-                     l1tf_vmx_states[l1tf_vmx_mitigation],
+-                     sched_smt_active() ? "vulnerable" : "disabled");
++      return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
++                        l1tf_vmx_states[l1tf_vmx_mitigation],
++                        sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ static ssize_t itlb_multihit_show_state(char *buf)
+ {
+       if (itlb_multihit_kvm_mitigation)
+-              return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
++              return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
+       else
+-              return sprintf(buf, "KVM: Vulnerable\n");
++              return sysfs_emit(buf, "KVM: Vulnerable\n");
+ }
+ #else
+ static ssize_t l1tf_show_state(char *buf)
+ {
+-      return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++      return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
+ }
+ static ssize_t itlb_multihit_show_state(char *buf)
+ {
+-      return sprintf(buf, "Processor vulnerable\n");
++      return sysfs_emit(buf, "Processor vulnerable\n");
+ }
+ #endif
+ static ssize_t mds_show_state(char *buf)
+ {
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+-              return sprintf(buf, "%s; SMT Host state unknown\n",
+-                             mds_strings[mds_mitigation]);
++              return sysfs_emit(buf, "%s; SMT Host state unknown\n",
++                                mds_strings[mds_mitigation]);
+       }
+       if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+-              return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+-                             (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+-                              sched_smt_active() ? "mitigated" : "disabled"));
++              return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
++                                 sched_smt_active() ? "mitigated" : "disabled"));
+       }
+-      return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+-                     sched_smt_active() ? "vulnerable" : "disabled");
++      return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++                        sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ static ssize_t tsx_async_abort_show_state(char *buf)
+ {
+       if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+           (taa_mitigation == TAA_MITIGATION_OFF))
+-              return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
++              return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+-              return sprintf(buf, "%s; SMT Host state unknown\n",
+-                             taa_strings[taa_mitigation]);
++              return sysfs_emit(buf, "%s; SMT Host state unknown\n",
++                                taa_strings[taa_mitigation]);
+       }
+-      return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+-                     sched_smt_active() ? "vulnerable" : "disabled");
++      return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
++                        sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ static ssize_t mmio_stale_data_show_state(char *buf)
+@@ -2267,33 +2267,33 @@ static char *pbrsb_eibrs_state(void)
+ static ssize_t spectre_v2_show_state(char *buf)
+ {
+       if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+-              return sprintf(buf, "Vulnerable: LFENCE\n");
++              return sysfs_emit(buf, "Vulnerable: LFENCE\n");
+       if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+-              return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
++              return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+-              return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
++              return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+-      return sprintf(buf, "%s%s%s%s%s%s%s\n",
+-                     spectre_v2_strings[spectre_v2_enabled],
+-                     ibpb_state(),
+-                     boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+-                     stibp_state(),
+-                     boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+-                     pbrsb_eibrs_state(),
+-                     spectre_v2_module_string());
++      return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
++                        spectre_v2_strings[spectre_v2_enabled],
++                        ibpb_state(),
++                        boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++                        stibp_state(),
++                        boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
++                        pbrsb_eibrs_state(),
++                        spectre_v2_module_string());
+ }
+ static ssize_t srbds_show_state(char *buf)
+ {
+-      return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
++      return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
+ }
+ static ssize_t retbleed_show_state(char *buf)
+ {
+-      return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
++      return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+ }
+ static ssize_t gds_show_state(char *buf)
+@@ -2305,26 +2305,26 @@ static ssize_t cpu_show_common(struct de
+                              char *buf, unsigned int bug)
+ {
+       if (!boot_cpu_has_bug(bug))
+-              return sprintf(buf, "Not affected\n");
++              return sysfs_emit(buf, "Not affected\n");
+       switch (bug) {
+       case X86_BUG_CPU_MELTDOWN:
+               if (boot_cpu_has(X86_FEATURE_PTI))
+-                      return sprintf(buf, "Mitigation: PTI\n");
++                      return sysfs_emit(buf, "Mitigation: PTI\n");
+               if (hypervisor_is_type(X86_HYPER_XEN_PV))
+-                      return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
++                      return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+               break;
+       case X86_BUG_SPECTRE_V1:
+-              return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
++              return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+       case X86_BUG_SPECTRE_V2:
+               return spectre_v2_show_state(buf);
+       case X86_BUG_SPEC_STORE_BYPASS:
+-              return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++              return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
+       case X86_BUG_L1TF:
+               if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+@@ -2357,7 +2357,7 @@ static ssize_t cpu_show_common(struct de
+               break;
+       }
+-      return sprintf(buf, "Vulnerable\n");
++      return sysfs_emit(buf, "Vulnerable\n");
+ }
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/queue-4.19/x86-cpu-support-amd-automatic-ibrs.patch b/queue-4.19/x86-cpu-support-amd-automatic-ibrs.patch
new file mode 100644 (file)
index 0000000..61fb899
--- /dev/null
@@ -0,0 +1,192 @@
+From e7862eda309ecfccc36bb5558d937ed3ace07f3f Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Tue, 24 Jan 2023 10:33:18 -0600
+Subject: x86/cpu: Support AMD Automatic IBRS
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit e7862eda309ecfccc36bb5558d937ed3ace07f3f upstream.
+
+The AMD Zen4 core supports a new feature called Automatic IBRS.
+
+It is a "set-and-forget" feature that means that, like Intel's Enhanced IBRS,
+h/w manages its IBRS mitigation resources automatically across CPL transitions.
+
+The feature is advertised by CPUID_Fn80000021_EAX bit 8 and is enabled by
+setting MSR C000_0080 (EFER) bit 21.
+
+Enable Automatic IBRS by default if the CPU feature is present.  It typically
+provides greater performance over the incumbent generic retpolines mitigation.
+
+Reuse the SPECTRE_V2_EIBRS spectre_v2_mitigation enum.  AMD Automatic IBRS and
+Intel Enhanced IBRS have similar enablement.  Add NO_EIBRS_PBRSB to
+cpu_vuln_whitelist, since AMD Automatic IBRS isn't affected by PBRSB-eIBRS.
+
+The kernel command line option spectre_v2=eibrs is used to select AMD Automatic
+IBRS, if available.
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Sean Christopherson <seanjc@google.com>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230124163319.2277355-8-kim.phillips@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/spectre.rst   |    6 +++---
+ Documentation/admin-guide/kernel-parameters.txt |    6 +++---
+ arch/x86/include/asm/cpufeatures.h              |    2 ++
+ arch/x86/include/asm/msr-index.h                |    2 ++
+ arch/x86/kernel/cpu/bugs.c                      |   20 ++++++++++++--------
+ arch/x86/kernel/cpu/common.c                    |   17 ++++++++++-------
+ 6 files changed, 32 insertions(+), 21 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -622,9 +622,9 @@ kernel command line.
+                 retpoline,generic       Retpolines
+                 retpoline,lfence        LFENCE; indirect branch
+                 retpoline,amd           alias for retpoline,lfence
+-                eibrs                   enhanced IBRS
+-                eibrs,retpoline         enhanced IBRS + Retpolines
+-                eibrs,lfence            enhanced IBRS + LFENCE
++                eibrs                   Enhanced/Auto IBRS
++                eibrs,retpoline         Enhanced/Auto IBRS + Retpolines
++                eibrs,lfence            Enhanced/Auto IBRS + LFENCE
+                 ibrs                    use IBRS to protect kernel
+               Not specifying this option is equivalent to
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4403,9 +4403,9 @@
+                       retpoline,generic - Retpolines
+                       retpoline,lfence  - LFENCE; indirect branch
+                       retpoline,amd     - alias for retpoline,lfence
+-                      eibrs             - enhanced IBRS
+-                      eibrs,retpoline   - enhanced IBRS + Retpolines
+-                      eibrs,lfence      - enhanced IBRS + LFENCE
++                      eibrs             - Enhanced/Auto IBRS
++                      eibrs,retpoline   - Enhanced/Auto IBRS + Retpolines
++                      eibrs,lfence      - Enhanced/Auto IBRS + LFENCE
+                       ibrs              - use IBRS to protect kernel
+                       Not specifying this option is equivalent to
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -369,6 +369,8 @@
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+ #define X86_FEATURE_SPEC_CTRL_SSBD    (18*32+31) /* "" Speculative Store Bypass Disable */
++#define X86_FEATURE_AUTOIBRS          (20*32+ 8) /* "" Automatic IBRS */
++
+ /*
+  * BUG word(s)
+  */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -30,6 +30,7 @@
+ #define _EFER_SVME            12 /* Enable virtualization */
+ #define _EFER_LMSLE           13 /* Long Mode Segment Limit Enable */
+ #define _EFER_FFXSR           14 /* Enable Fast FXSAVE/FXRSTOR */
++#define _EFER_AUTOIBRS                21 /* Enable Automatic IBRS */
+ #define EFER_SCE              (1<<_EFER_SCE)
+ #define EFER_LME              (1<<_EFER_LME)
+@@ -38,6 +39,7 @@
+ #define EFER_SVME             (1<<_EFER_SVME)
+ #define EFER_LMSLE            (1<<_EFER_LMSLE)
+ #define EFER_FFXSR            (1<<_EFER_FFXSR)
++#define EFER_AUTOIBRS         (1<<_EFER_AUTOIBRS)
+ /* Intel MSRs. Some also available on other CPUs */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1187,9 +1187,9 @@ static const char * const spectre_v2_str
+       [SPECTRE_V2_NONE]                       = "Vulnerable",
+       [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
+       [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
+-      [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
+-      [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
+-      [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
++      [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced / Automatic IBRS",
++      [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
++      [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
+       [SPECTRE_V2_IBRS]                       = "Mitigation: IBRS",
+ };
+@@ -1258,7 +1258,7 @@ static enum spectre_v2_mitigation_cmd __
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+-              pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
++              pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+@@ -1437,8 +1437,12 @@ static void __init spectre_v2_select_mit
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+       if (spectre_v2_in_ibrs_mode(mode)) {
+-              x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+-              update_spec_ctrl(x86_spec_ctrl_base);
++              if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
++                      msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
++              } else {
++                      x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
++                      update_spec_ctrl(x86_spec_ctrl_base);
++              }
+       }
+       switch (mode) {
+@@ -1522,8 +1526,8 @@ static void __init spectre_v2_select_mit
+       /*
+        * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
+        * and Enhanced IBRS protect firmware too, so enable IBRS around
+-       * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
+-       * enabled.
++       * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
++       * otherwise enabled.
+        *
+        * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+        * the user might select retpoline on the kernel command line and if
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1025,7 +1025,7 @@ static const __initconst struct x86_cpu_
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+-      VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+       {}
+ };
+@@ -1133,8 +1133,16 @@ static void __init cpu_set_bug_bits(stru
+          !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+-      if (ia32_cap & ARCH_CAP_IBRS_ALL)
++      /*
++       * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
++       * flag and protect from vendor-specific bugs via the whitelist.
++       */
++      if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
++              if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
++                  !(ia32_cap & ARCH_CAP_PBRSB_NO))
++                      setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
++      }
+       if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+           !(ia32_cap & ARCH_CAP_MDS_NO)) {
+@@ -1196,11 +1204,6 @@ static void __init cpu_set_bug_bits(stru
+                       setup_force_cpu_bug(X86_BUG_RETBLEED);
+       }
+-      if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
+-          !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+-          !(ia32_cap & ARCH_CAP_PBRSB_NO))
+-              setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+-
+       /*
+        * Check if CPU is vulnerable to GDS. If running in a virtual machine on
+        * an affected processor, the VMM may have disabled the use of GATHER by