]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Mar 2024 10:39:02 +0000 (11:39 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Mar 2024 10:39:02 +0000 (11:39 +0100)
added patches:
documentation-hw-vuln-update-spectre-doc.patch
kvm-x86-advertise-cpuid.-eax-7-ecx-2-edx-to-userspace.patch
kvm-x86-update-kvm-only-leaf-handling-to-allow-for-100-kvm-only-leafs.patch
x86-bugs-use-sysfs_emit.patch
x86-cpu-support-amd-automatic-ibrs.patch

queue-5.15/documentation-hw-vuln-update-spectre-doc.patch [new file with mode: 0644]
queue-5.15/kvm-x86-advertise-cpuid.-eax-7-ecx-2-edx-to-userspace.patch [new file with mode: 0644]
queue-5.15/kvm-x86-update-kvm-only-leaf-handling-to-allow-for-100-kvm-only-leafs.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/x86-bugs-use-sysfs_emit.patch [new file with mode: 0644]
queue-5.15/x86-cpu-support-amd-automatic-ibrs.patch [new file with mode: 0644]

diff --git a/queue-5.15/documentation-hw-vuln-update-spectre-doc.patch b/queue-5.15/documentation-hw-vuln-update-spectre-doc.patch
new file mode 100644 (file)
index 0000000..d71af62
--- /dev/null
@@ -0,0 +1,33 @@
+From 06cb31cc761823ef444ba4e1df11347342a6e745 Mon Sep 17 00:00:00 2001
+From: Lin Yujun <linyujun809@huawei.com>
+Date: Tue, 30 Aug 2022 20:36:14 +0800
+Subject: Documentation/hw-vuln: Update spectre doc
+
+From: Lin Yujun <linyujun809@huawei.com>
+
+commit 06cb31cc761823ef444ba4e1df11347342a6e745 upstream.
+
+commit 7c693f54c873691 ("x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS")
+
+adds the "ibrs " option  in
+Documentation/admin-guide/kernel-parameters.txt but omits it to
+Documentation/admin-guide/hw-vuln/spectre.rst, add it.
+
+Signed-off-by: Lin Yujun <linyujun809@huawei.com>
+Link: https://lore.kernel.org/r/20220830123614.23007-1-linyujun809@huawei.com
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/spectre.rst |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -625,6 +625,7 @@ kernel command line.
+                 eibrs                   enhanced IBRS
+                 eibrs,retpoline         enhanced IBRS + Retpolines
+                 eibrs,lfence            enhanced IBRS + LFENCE
++                ibrs                    use IBRS to protect kernel
+               Not specifying this option is equivalent to
+               spectre_v2=auto.
diff --git a/queue-5.15/kvm-x86-advertise-cpuid.-eax-7-ecx-2-edx-to-userspace.patch b/queue-5.15/kvm-x86-advertise-cpuid.-eax-7-ecx-2-edx-to-userspace.patch
new file mode 100644 (file)
index 0000000..d09ea19
--- /dev/null
@@ -0,0 +1,127 @@
+From eefe5e6682099445f77f2d97d4c525f9ac9d9b07 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Mon, 23 Oct 2023 17:16:35 -0700
+Subject: KVM: x86: Advertise CPUID.(EAX=7,ECX=2):EDX[5:0] to userspace
+
+From: Jim Mattson <jmattson@google.com>
+
+commit eefe5e6682099445f77f2d97d4c525f9ac9d9b07 upstream.
+
+The low five bits {INTEL_PSFD, IPRED_CTRL, RRSBA_CTRL, DDPD_U, BHI_CTRL}
+advertise the availability of specific bits in IA32_SPEC_CTRL. Since KVM
+dynamically determines the legal IA32_SPEC_CTRL bits for the underlying
+hardware, the hard work has already been done. Just let userspace know
+that a guest can use these IA32_SPEC_CTRL bits.
+
+The sixth bit (MCDT_NO) states that the processor does not exhibit MXCSR
+Configuration Dependent Timing (MCDT) behavior. This is an inherent
+property of the physical processor that is inherited by the virtual
+CPU. Pass that information on to userspace.
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Chao Gao <chao.gao@intel.com>
+Link: https://lore.kernel.org/r/20231024001636.890236-1-jmattson@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c         | 21 ++++++++++++++++++---
+ arch/x86/kvm/reverse_cpuid.h | 12 ++++++++++++
+ 2 files changed, 30 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index dda6fc4cfae8..1811a9ddfe1d 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -679,6 +679,11 @@ void kvm_set_cpu_caps(void)
+               F(AMX_COMPLEX)
+       );
++      kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
++              F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
++              F(BHI_CTRL) | F(MCDT_NO)
++      );
++
+       kvm_cpu_cap_mask(CPUID_D_1_EAX,
+               F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
+       );
+@@ -960,13 +965,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+               break;
+       /* function 7 has additional index. */
+       case 7:
+-              entry->eax = min(entry->eax, 1u);
++              max_idx = entry->eax = min(entry->eax, 2u);
+               cpuid_entry_override(entry, CPUID_7_0_EBX);
+               cpuid_entry_override(entry, CPUID_7_ECX);
+               cpuid_entry_override(entry, CPUID_7_EDX);
+-              /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
+-              if (entry->eax == 1) {
++              /* KVM only supports up to 0x7.2, capped above via min(). */
++              if (max_idx >= 1) {
+                       entry = do_host_cpuid(array, function, 1);
+                       if (!entry)
+                               goto out;
+@@ -976,6 +981,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+                       entry->ebx = 0;
+                       entry->ecx = 0;
+               }
++              if (max_idx >= 2) {
++                      entry = do_host_cpuid(array, function, 2);
++                      if (!entry)
++                              goto out;
++
++                      cpuid_entry_override(entry, CPUID_7_2_EDX);
++                      entry->ecx = 0;
++                      entry->ebx = 0;
++                      entry->eax = 0;
++              }
+               break;
+       case 0xa: { /* Architectural Performance Monitoring */
+               union cpuid10_eax eax;
+diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
+index b81650678375..17007016d8b5 100644
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs {
+       CPUID_7_1_EDX,
+       CPUID_8000_0007_EDX,
+       CPUID_8000_0022_EAX,
++      CPUID_7_2_EDX,
+       NR_KVM_CPU_CAPS,
+       NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+@@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs {
+ #define X86_FEATURE_AMX_COMPLEX         KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
+ #define X86_FEATURE_PREFETCHITI         KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
++/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
++#define X86_FEATURE_INTEL_PSFD                KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
++#define X86_FEATURE_IPRED_CTRL                KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
++#define KVM_X86_FEATURE_RRSBA_CTRL    KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
++#define X86_FEATURE_DDPD_U            KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
++#define X86_FEATURE_BHI_CTRL          KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
++#define X86_FEATURE_MCDT_NO           KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
++
+ /* CPUID level 0x80000007 (EDX). */
+ #define KVM_X86_FEATURE_CONSTANT_TSC  KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
+@@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
+       [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
+       [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
+       [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
++      [CPUID_7_2_EDX]       = {         7, 2, CPUID_EDX},
+ };
+ /*
+@@ -116,6 +126,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
+               return KVM_X86_FEATURE_CONSTANT_TSC;
+       else if (x86_feature == X86_FEATURE_PERFMON_V2)
+               return KVM_X86_FEATURE_PERFMON_V2;
++      else if (x86_feature == X86_FEATURE_RRSBA_CTRL)
++              return KVM_X86_FEATURE_RRSBA_CTRL;
+       return x86_feature;
+ }
+-- 
+2.44.0
+
diff --git a/queue-5.15/kvm-x86-update-kvm-only-leaf-handling-to-allow-for-100-kvm-only-leafs.patch b/queue-5.15/kvm-x86-update-kvm-only-leaf-handling-to-allow-for-100-kvm-only-leafs.patch
new file mode 100644 (file)
index 0000000..da87ac1
--- /dev/null
@@ -0,0 +1,94 @@
+From 047c7229906152fb85c23dc18fd25a00cd7cb4de Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 25 Nov 2022 20:58:39 +0800
+Subject: KVM: x86: Update KVM-only leaf handling to allow for 100% KVM-only leafs
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 047c7229906152fb85c23dc18fd25a00cd7cb4de upstream.
+
+Rename kvm_cpu_cap_init_scattered() to kvm_cpu_cap_init_kvm_defined() in
+anticipation of adding KVM-only CPUID leafs that aren't recognized by the
+kernel and thus not scattered, i.e. for leafs that are 100% KVM-defined.
+
+Adjust/add comments to kvm_only_cpuid_leafs and KVM_X86_FEATURE to
+document how to create new kvm_only_cpuid_leafs entries for scattered
+features as well as features that are entirely unknown to the kernel.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20221125125845.1182922-3-jiaxi.chen@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c         |    8 ++++----
+ arch/x86/kvm/reverse_cpuid.h |   18 +++++++++++++++---
+ 2 files changed, 19 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -355,9 +355,9 @@ static __always_inline void __kvm_cpu_ca
+ }
+ static __always_inline
+-void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
++void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
+ {
+-      /* Use kvm_cpu_cap_mask for non-scattered leafs. */
++      /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
+       BUILD_BUG_ON(leaf < NCAPINTS);
+       kvm_cpu_caps[leaf] = mask;
+@@ -367,7 +367,7 @@ void kvm_cpu_cap_init_scattered(enum kvm
+ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
+ {
+-      /* Use kvm_cpu_cap_init_scattered for scattered leafs. */
++      /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
+       BUILD_BUG_ON(leaf >= NCAPINTS);
+       kvm_cpu_caps[leaf] &= mask;
+@@ -473,7 +473,7 @@ void kvm_set_cpu_caps(void)
+               F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
+       );
+-      kvm_cpu_cap_init_scattered(CPUID_12_EAX,
++      kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
+               SF(SGX1) | SF(SGX2)
+       );
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -7,9 +7,9 @@
+ #include <asm/cpufeatures.h>
+ /*
+- * Hardware-defined CPUID leafs that are scattered in the kernel, but need to
+- * be directly used by KVM.  Note, these word values conflict with the kernel's
+- * "bug" caps, but KVM doesn't use those.
++ * Hardware-defined CPUID leafs that are either scattered by the kernel or are
++ * unknown to the kernel, but need to be directly used by KVM.  Note, these
++ * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
+  */
+ enum kvm_only_cpuid_leafs {
+       CPUID_12_EAX     = NCAPINTS,
+@@ -18,6 +18,18 @@ enum kvm_only_cpuid_leafs {
+       NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+ };
++/*
++ * Define a KVM-only feature flag.
++ *
++ * For features that are scattered by cpufeatures.h, __feature_translate() also
++ * needs to be updated to translate the kernel-defined feature into the
++ * KVM-defined feature.
++ *
++ * For features that are 100% KVM-only, i.e. not defined by cpufeatures.h,
++ * forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so
++ * that X86_FEATURE_* can be used in KVM.  No __feature_translate() handling is
++ * needed in this case.
++ */
+ #define KVM_X86_FEATURE(w, f)         ((w)*32 + (f))
+ /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1d6d7b86f16658f2729131395d08dfc6f7283a22 100644 (file)
@@ -0,0 +1,5 @@
+documentation-hw-vuln-update-spectre-doc.patch
+x86-cpu-support-amd-automatic-ibrs.patch
+x86-bugs-use-sysfs_emit.patch
+kvm-x86-update-kvm-only-leaf-handling-to-allow-for-100-kvm-only-leafs.patch
+kvm-x86-advertise-cpuid.-eax-7-ecx-2-edx-to-userspace.patch
diff --git a/queue-5.15/x86-bugs-use-sysfs_emit.patch b/queue-5.15/x86-bugs-use-sysfs_emit.patch
new file mode 100644 (file)
index 0000000..635b9f4
--- /dev/null
@@ -0,0 +1,231 @@
+From 1d30800c0c0ae1d086ffad2bdf0ba4403370f132 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 9 Aug 2022 17:32:02 +0200
+Subject: x86/bugs: Use sysfs_emit()
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 1d30800c0c0ae1d086ffad2bdf0ba4403370f132 upstream.
+
+Those mitigations are very talkative; use the printing helper which pays
+attention to the buffer size.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lore.kernel.org/r/20220809153419.10182-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |  105 ++++++++++++++++++++++-----------------------
+ 1 file changed, 52 insertions(+), 53 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2506,74 +2506,74 @@ static const char * const l1tf_vmx_state
+ static ssize_t l1tf_show_state(char *buf)
+ {
+       if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+-              return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++              return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
+       if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+           (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+            sched_smt_active())) {
+-              return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+-                             l1tf_vmx_states[l1tf_vmx_mitigation]);
++              return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
++                                l1tf_vmx_states[l1tf_vmx_mitigation]);
+       }
+-      return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+-                     l1tf_vmx_states[l1tf_vmx_mitigation],
+-                     sched_smt_active() ? "vulnerable" : "disabled");
++      return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
++                        l1tf_vmx_states[l1tf_vmx_mitigation],
++                        sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ static ssize_t itlb_multihit_show_state(char *buf)
+ {
+       if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
+           !boot_cpu_has(X86_FEATURE_VMX))
+-              return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
++              return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
+       else if (!(cr4_read_shadow() & X86_CR4_VMXE))
+-              return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
++              return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
+       else if (itlb_multihit_kvm_mitigation)
+-              return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
++              return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
+       else
+-              return sprintf(buf, "KVM: Vulnerable\n");
++              return sysfs_emit(buf, "KVM: Vulnerable\n");
+ }
+ #else
+ static ssize_t l1tf_show_state(char *buf)
+ {
+-      return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++      return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
+ }
+ static ssize_t itlb_multihit_show_state(char *buf)
+ {
+-      return sprintf(buf, "Processor vulnerable\n");
++      return sysfs_emit(buf, "Processor vulnerable\n");
+ }
+ #endif
+ static ssize_t mds_show_state(char *buf)
+ {
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+-              return sprintf(buf, "%s; SMT Host state unknown\n",
+-                             mds_strings[mds_mitigation]);
++              return sysfs_emit(buf, "%s; SMT Host state unknown\n",
++                                mds_strings[mds_mitigation]);
+       }
+       if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+-              return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+-                             (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+-                              sched_smt_active() ? "mitigated" : "disabled"));
++              return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
++                                 sched_smt_active() ? "mitigated" : "disabled"));
+       }
+-      return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+-                     sched_smt_active() ? "vulnerable" : "disabled");
++      return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++                        sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ static ssize_t tsx_async_abort_show_state(char *buf)
+ {
+       if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+           (taa_mitigation == TAA_MITIGATION_OFF))
+-              return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
++              return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+-              return sprintf(buf, "%s; SMT Host state unknown\n",
+-                             taa_strings[taa_mitigation]);
++              return sysfs_emit(buf, "%s; SMT Host state unknown\n",
++                                taa_strings[taa_mitigation]);
+       }
+-      return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+-                     sched_smt_active() ? "vulnerable" : "disabled");
++      return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
++                        sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ static ssize_t mmio_stale_data_show_state(char *buf)
+@@ -2641,47 +2641,46 @@ static char *pbrsb_eibrs_state(void)
+ static ssize_t spectre_v2_show_state(char *buf)
+ {
+       if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+-              return sprintf(buf, "Vulnerable: LFENCE\n");
++              return sysfs_emit(buf, "Vulnerable: LFENCE\n");
+       if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+-              return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
++              return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+-              return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
++              return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+-      return sprintf(buf, "%s%s%s%s%s%s%s\n",
+-                     spectre_v2_strings[spectre_v2_enabled],
+-                     ibpb_state(),
+-                     boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+-                     stibp_state(),
+-                     boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+-                     pbrsb_eibrs_state(),
+-                     spectre_v2_module_string());
++      return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
++                        spectre_v2_strings[spectre_v2_enabled],
++                        ibpb_state(),
++                        boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++                        stibp_state(),
++                        boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
++                        pbrsb_eibrs_state(),
++                        spectre_v2_module_string());
+ }
+ static ssize_t srbds_show_state(char *buf)
+ {
+-      return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
++      return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
+ }
+ static ssize_t retbleed_show_state(char *buf)
+ {
+       if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
+           retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+-          if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+-              boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+-                  return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
+-
+-          return sprintf(buf, "%s; SMT %s\n",
+-                         retbleed_strings[retbleed_mitigation],
+-                         !sched_smt_active() ? "disabled" :
+-                         spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-                         spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
+-                         "enabled with STIBP protection" : "vulnerable");
++              if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
++                  boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
++                      return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
++
++              return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
++                                !sched_smt_active() ? "disabled" :
++                                spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++                                spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
++                                "enabled with STIBP protection" : "vulnerable");
+       }
+-      return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
++      return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+ }
+ static ssize_t gds_show_state(char *buf)
+@@ -2703,26 +2702,26 @@ static ssize_t cpu_show_common(struct de
+                              char *buf, unsigned int bug)
+ {
+       if (!boot_cpu_has_bug(bug))
+-              return sprintf(buf, "Not affected\n");
++              return sysfs_emit(buf, "Not affected\n");
+       switch (bug) {
+       case X86_BUG_CPU_MELTDOWN:
+               if (boot_cpu_has(X86_FEATURE_PTI))
+-                      return sprintf(buf, "Mitigation: PTI\n");
++                      return sysfs_emit(buf, "Mitigation: PTI\n");
+               if (hypervisor_is_type(X86_HYPER_XEN_PV))
+-                      return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
++                      return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+               break;
+       case X86_BUG_SPECTRE_V1:
+-              return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
++              return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+       case X86_BUG_SPECTRE_V2:
+               return spectre_v2_show_state(buf);
+       case X86_BUG_SPEC_STORE_BYPASS:
+-              return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++              return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
+       case X86_BUG_L1TF:
+               if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+@@ -2758,7 +2757,7 @@ static ssize_t cpu_show_common(struct de
+               break;
+       }
+-      return sprintf(buf, "Vulnerable\n");
++      return sysfs_emit(buf, "Vulnerable\n");
+ }
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/queue-5.15/x86-cpu-support-amd-automatic-ibrs.patch b/queue-5.15/x86-cpu-support-amd-automatic-ibrs.patch
new file mode 100644 (file)
index 0000000..244dfb7
--- /dev/null
@@ -0,0 +1,193 @@
+From e7862eda309ecfccc36bb5558d937ed3ace07f3f Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Tue, 24 Jan 2023 10:33:18 -0600
+Subject: x86/cpu: Support AMD Automatic IBRS
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit e7862eda309ecfccc36bb5558d937ed3ace07f3f upstream.
+
+The AMD Zen4 core supports a new feature called Automatic IBRS.
+
+It is a "set-and-forget" feature that means that, like Intel's Enhanced IBRS,
+h/w manages its IBRS mitigation resources automatically across CPL transitions.
+
+The feature is advertised by CPUID_Fn80000021_EAX bit 8 and is enabled by
+setting MSR C000_0080 (EFER) bit 21.
+
+Enable Automatic IBRS by default if the CPU feature is present.  It typically
+provides greater performance over the incumbent generic retpolines mitigation.
+
+Reuse the SPECTRE_V2_EIBRS spectre_v2_mitigation enum.  AMD Automatic IBRS and
+Intel Enhanced IBRS have similar enablement.  Add NO_EIBRS_PBRSB to
+cpu_vuln_whitelist, since AMD Automatic IBRS isn't affected by PBRSB-eIBRS.
+
+The kernel command line option spectre_v2=eibrs is used to select AMD Automatic
+IBRS, if available.
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Sean Christopherson <seanjc@google.com>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230124163319.2277355-8-kim.phillips@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/spectre.rst   |    6 +++---
+ Documentation/admin-guide/kernel-parameters.txt |    6 +++---
+ arch/x86/include/asm/cpufeatures.h              |    1 +
+ arch/x86/include/asm/msr-index.h                |    2 ++
+ arch/x86/kernel/cpu/bugs.c                      |   20 ++++++++++++--------
+ arch/x86/kernel/cpu/common.c                    |   19 +++++++++++--------
+ 6 files changed, 32 insertions(+), 22 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -622,9 +622,9 @@ kernel command line.
+                 retpoline,generic       Retpolines
+                 retpoline,lfence        LFENCE; indirect branch
+                 retpoline,amd           alias for retpoline,lfence
+-                eibrs                   enhanced IBRS
+-                eibrs,retpoline         enhanced IBRS + Retpolines
+-                eibrs,lfence            enhanced IBRS + LFENCE
++                eibrs                   Enhanced/Auto IBRS
++                eibrs,retpoline         Enhanced/Auto IBRS + Retpolines
++                eibrs,lfence            Enhanced/Auto IBRS + LFENCE
+                 ibrs                    use IBRS to protect kernel
+               Not specifying this option is equivalent to
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5413,9 +5413,9 @@
+                       retpoline,generic - Retpolines
+                       retpoline,lfence  - LFENCE; indirect branch
+                       retpoline,amd     - alias for retpoline,lfence
+-                      eibrs             - enhanced IBRS
+-                      eibrs,retpoline   - enhanced IBRS + Retpolines
+-                      eibrs,lfence      - enhanced IBRS + LFENCE
++                      eibrs             - Enhanced/Auto IBRS
++                      eibrs,retpoline   - Enhanced/Auto IBRS + Retpolines
++                      eibrs,lfence      - Enhanced/Auto IBRS + LFENCE
+                       ibrs              - use IBRS to protect kernel
+                       Not specifying this option is equivalent to
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -416,6 +416,7 @@
+ #define X86_FEATURE_SEV_ES            (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+ #define X86_FEATURE_SME_COHERENT      (19*32+10) /* "" AMD hardware-enforced cache coherency */
++#define X86_FEATURE_AUTOIBRS          (20*32+ 8) /* "" Automatic IBRS */
+ #define X86_FEATURE_SBPB              (20*32+27) /* "" Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE               (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO           (20*32+29) /* "" CPU is not affected by SRSO */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -30,6 +30,7 @@
+ #define _EFER_SVME            12 /* Enable virtualization */
+ #define _EFER_LMSLE           13 /* Long Mode Segment Limit Enable */
+ #define _EFER_FFXSR           14 /* Enable Fast FXSAVE/FXRSTOR */
++#define _EFER_AUTOIBRS                21 /* Enable Automatic IBRS */
+ #define EFER_SCE              (1<<_EFER_SCE)
+ #define EFER_LME              (1<<_EFER_LME)
+@@ -38,6 +39,7 @@
+ #define EFER_SVME             (1<<_EFER_SVME)
+ #define EFER_LMSLE            (1<<_EFER_LMSLE)
+ #define EFER_FFXSR            (1<<_EFER_FFXSR)
++#define EFER_AUTOIBRS         (1<<_EFER_AUTOIBRS)
+ /* Intel MSRs. Some also available on other CPUs */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1330,9 +1330,9 @@ static const char * const spectre_v2_str
+       [SPECTRE_V2_NONE]                       = "Vulnerable",
+       [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
+       [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
+-      [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
+-      [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
+-      [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
++      [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced / Automatic IBRS",
++      [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
++      [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
+       [SPECTRE_V2_IBRS]                       = "Mitigation: IBRS",
+ };
+@@ -1401,7 +1401,7 @@ static enum spectre_v2_mitigation_cmd __
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+-              pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
++              pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+@@ -1586,8 +1586,12 @@ static void __init spectre_v2_select_mit
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+       if (spectre_v2_in_ibrs_mode(mode)) {
+-              x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+-              update_spec_ctrl(x86_spec_ctrl_base);
++              if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
++                      msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
++              } else {
++                      x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
++                      update_spec_ctrl(x86_spec_ctrl_base);
++              }
+       }
+       switch (mode) {
+@@ -1671,8 +1675,8 @@ static void __init spectre_v2_select_mit
+       /*
+        * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
+        * and Enhanced IBRS protect firmware too, so enable IBRS around
+-       * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
+-       * enabled.
++       * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
++       * otherwise enabled.
+        *
+        * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+        * the user might select retpoline on the kernel command line and if
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1102,8 +1102,8 @@ static const __initconst struct x86_cpu_
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+-      VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+-      VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
++      VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+       /* Zhaoxin Family 7 */
+       VULNWL(CENTAUR, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+@@ -1223,8 +1223,16 @@ static void __init cpu_set_bug_bits(stru
+          !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+-      if (ia32_cap & ARCH_CAP_IBRS_ALL)
++      /*
++       * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
++       * flag and protect from vendor-specific bugs via the whitelist.
++       */
++      if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
++              if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
++                  !(ia32_cap & ARCH_CAP_PBRSB_NO))
++                      setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
++      }
+       if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+           !(ia32_cap & ARCH_CAP_MDS_NO)) {
+@@ -1286,11 +1294,6 @@ static void __init cpu_set_bug_bits(stru
+                       setup_force_cpu_bug(X86_BUG_RETBLEED);
+       }
+-      if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
+-          !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+-          !(ia32_cap & ARCH_CAP_PBRSB_NO))
+-              setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+-
+       if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
+               setup_force_cpu_bug(X86_BUG_SMT_RSB);