From ee72d7ff01b5fd6ef6e58b0a4c6893b8c6cd6957 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 14 Jun 2022 19:26:40 +0200 Subject: [PATCH] 5.10-stable patches added patches: documentation-add-documentation-for-processor-mmio-stale-data.patch kvm-x86-speculation-disable-fill-buffer-clear-within-guests.patch x86-bugs-group-mds-taa-processor-mmio-stale-data-mitigations.patch x86-speculation-add-a-common-function-for-md_clear-mitigation-update.patch x86-speculation-mmio-add-mitigation-for-processor-mmio-stale-data.patch x86-speculation-mmio-add-sysfs-reporting-for-processor-mmio-stale-data.patch x86-speculation-mmio-enable-cpu-fill-buffer-clearing-on-idle.patch x86-speculation-mmio-enumerate-processor-mmio-stale-data-bug.patch x86-speculation-mmio-print-smt-warning.patch x86-speculation-mmio-reuse-srbds-mitigation-for-sbds.patch x86-speculation-srbds-update-srbds-mitigation-selection.patch --- ...tation-for-processor-mmio-stale-data.patch | 277 ++++++++++++++++ ...able-fill-buffer-clear-within-guests.patch | 229 +++++++++++++ ...rocessor-mmio-stale-data-mitigations.patch | 79 +++++ ...ction-for-md_clear-mitigation-update.patch | 135 ++++++++ ...gation-for-processor-mmio-stale-data.patch | 302 ++++++++++++++++++ ...orting-for-processor-mmio-stale-data.patch | 120 +++++++ ...ble-cpu-fill-buffer-clearing-on-idle.patch | 68 ++++ ...merate-processor-mmio-stale-data-bug.patch | 178 +++++++++++ ...6-speculation-mmio-print-smt-warning.patch | 46 +++ ...mmio-reuse-srbds-mitigation-for-sbds.patch | 81 +++++ ...ds-update-srbds-mitigation-selection.patch | 45 +++ 11 files changed, 1560 insertions(+) create mode 100644 queue-5.10/documentation-add-documentation-for-processor-mmio-stale-data.patch create mode 100644 queue-5.10/kvm-x86-speculation-disable-fill-buffer-clear-within-guests.patch create mode 100644 queue-5.10/x86-bugs-group-mds-taa-processor-mmio-stale-data-mitigations.patch create mode 100644 queue-5.10/x86-speculation-add-a-common-function-for-md_clear-mitigation-update.patch create mode 100644 queue-5.10/x86-speculation-mmio-add-mitigation-for-processor-mmio-stale-data.patch create mode 100644 queue-5.10/x86-speculation-mmio-add-sysfs-reporting-for-processor-mmio-stale-data.patch create mode 100644 queue-5.10/x86-speculation-mmio-enable-cpu-fill-buffer-clearing-on-idle.patch create mode 100644 queue-5.10/x86-speculation-mmio-enumerate-processor-mmio-stale-data-bug.patch create mode 100644 queue-5.10/x86-speculation-mmio-print-smt-warning.patch create mode 100644 queue-5.10/x86-speculation-mmio-reuse-srbds-mitigation-for-sbds.patch create mode 100644 queue-5.10/x86-speculation-srbds-update-srbds-mitigation-selection.patch diff --git a/queue-5.10/documentation-add-documentation-for-processor-mmio-stale-data.patch b/queue-5.10/documentation-add-documentation-for-processor-mmio-stale-data.patch new file mode 100644 index 00000000000..c60b67c0b2c --- /dev/null +++ b/queue-5.10/documentation-add-documentation-for-processor-mmio-stale-data.patch @@ -0,0 +1,277 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:26:07 -0700 +Subject: Documentation: Add documentation for Processor MMIO Stale Data + +From: Pawan Gupta + +commit 4419470191386456e0b8ed4eb06a70b0021798a6 upstream + +Add the admin guide for Processor MMIO stale data vulnerabilities. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/hw-vuln/index.rst | 1 + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst | 246 ++++++++++ + 2 files changed, 247 insertions(+) + create mode 100644 Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -15,3 +15,4 @@ are configurable at compile, boot or run + tsx_async_abort + multihit.rst + special-register-buffer-data-sampling.rst ++ processor_mmio_stale_data.rst +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +@@ -0,0 +1,246 @@ ++========================================= ++Processor MMIO Stale Data Vulnerabilities ++========================================= ++ ++Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O ++(MMIO) vulnerabilities that can expose data. The sequences of operations for ++exposing data range from simple to very complex. Because most of the ++vulnerabilities require the attacker to have access to MMIO, many environments ++are not affected. System environments using virtualization where MMIO access is ++provided to untrusted guests may need mitigation. These vulnerabilities are ++not transient execution attacks. However, these vulnerabilities may propagate ++stale data into core fill buffers where the data can subsequently be inferred ++by an unmitigated transient execution attack. Mitigation for these ++vulnerabilities includes a combination of microcode update and software ++changes, depending on the platform and usage model. Some of these mitigations ++are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or ++those used to mitigate Special Register Buffer Data Sampling (SRBDS). ++ ++Data Propagators ++================ ++Propagators are operations that result in stale data being copied or moved from ++one microarchitectural buffer or register to another. Processor MMIO Stale Data ++Vulnerabilities are operations that may result in stale data being directly ++read into an architectural, software-visible state or sampled from a buffer or ++register. ++ ++Fill Buffer Stale Data Propagator (FBSDP) ++----------------------------------------- ++Stale data may propagate from fill buffers (FB) into the non-coherent portion ++of the uncore on some non-coherent writes. Fill buffer propagation by itself ++does not make stale data architecturally visible. Stale data must be propagated ++to a location where it is subject to reading or sampling. ++ ++Sideband Stale Data Propagator (SSDP) ++------------------------------------- ++The sideband stale data propagator (SSDP) is limited to the client (including ++Intel Xeon server E3) uncore implementation. The sideband response buffer is ++shared by all client cores. For non-coherent reads that go to sideband ++destinations, the uncore logic returns 64 bytes of data to the core, including ++both requested data and unrequested stale data, from a transaction buffer and ++the sideband response buffer. As a result, stale data from the sideband ++response and transaction buffers may now reside in a core fill buffer. ++ ++Primary Stale Data Propagator (PSDP) ++------------------------------------ ++The primary stale data propagator (PSDP) is limited to the client (including ++Intel Xeon server E3) uncore implementation. Similar to the sideband response ++buffer, the primary response buffer is shared by all client cores. For some ++processors, MMIO primary reads will return 64 bytes of data to the core fill ++buffer including both requested data and unrequested stale data. This is ++similar to the sideband stale data propagator. ++ ++Vulnerabilities ++=============== ++Device Register Partial Write (DRPW) (CVE-2022-21166) ++----------------------------------------------------- ++Some endpoint MMIO registers incorrectly handle writes that are smaller than ++the register size. Instead of aborting the write or only copying the correct ++subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than ++specified by the write transaction may be written to the register. On ++processors affected by FBSDP, this may expose stale data from the fill buffers ++of the core that created the write transaction. ++ ++Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) ++---------------------------------------------------- ++After propagators may have moved data around the uncore and copied stale data ++into client core fill buffers, processors affected by MFBDS can leak data from ++the fill buffer. It is limited to the client (including Intel Xeon server E3) ++uncore implementation. ++ ++Shared Buffers Data Read (SBDR) (CVE-2022-21123) ++------------------------------------------------ ++It is similar to Shared Buffer Data Sampling (SBDS) except that the data is ++directly read into the architectural software-visible state. It is limited to ++the client (including Intel Xeon server E3) uncore implementation. ++ ++Affected Processors ++=================== ++Not all the CPUs are affected by all the variants. For instance, most ++processors for the server market (excluding Intel Xeon E3 processors) are ++impacted by only Device Register Partial Write (DRPW). ++ ++Below is the list of affected Intel processors [#f1]_: ++ ++ =================== ============ ========= ++ Common name Family_Model Steppings ++ =================== ============ ========= ++ HASWELL_X 06_3FH 2,4 ++ SKYLAKE_L 06_4EH 3 ++ BROADWELL_X 06_4FH All ++ SKYLAKE_X 06_55H 3,4,6,7,11 ++ BROADWELL_D 06_56H 3,4,5 ++ SKYLAKE 06_5EH 3 ++ ICELAKE_X 06_6AH 4,5,6 ++ ICELAKE_D 06_6CH 1 ++ ICELAKE_L 06_7EH 5 ++ ATOM_TREMONT_D 06_86H All ++ LAKEFIELD 06_8AH 1 ++ KABYLAKE_L 06_8EH 9 to 12 ++ ATOM_TREMONT 06_96H 1 ++ ATOM_TREMONT_L 06_9CH 0 ++ KABYLAKE 06_9EH 9 to 13 ++ COMETLAKE 06_A5H 2,3,5 ++ COMETLAKE_L 06_A6H 0,1 ++ ROCKETLAKE 06_A7H 1 ++ =================== ============ ========= ++ ++If a CPU is in the affected processor list, but not affected by a variant, it ++is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later ++section, mitigation largely remains the same for all the variants, i.e. to ++clear the CPU fill buffers via VERW instruction. ++ ++New bits in MSRs ++================ ++Newer processors and microcode update on existing affected processors added new ++bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate ++specific variants of Processor MMIO Stale Data vulnerabilities and mitigation ++capability. ++ ++MSR IA32_ARCH_CAPABILITIES ++-------------------------- ++Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the ++ Shared Buffers Data Read (SBDR) vulnerability or the sideband stale ++ data propagator (SSDP). ++Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer ++ Stale Data Propagator (FBSDP). ++Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data ++ Propagator (PSDP). ++Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer ++ values as part of MD_CLEAR operations. Processors that do not ++ enumerate MDS_NO (meaning they are affected by MDS) but that do ++ enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate ++ FB_CLEAR as part of their MD_CLEAR support. ++Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR ++ IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS ++ bit can be set to cause the VERW instruction to not perform the ++ FB_CLEAR action. Not all processors that support FB_CLEAR will support ++ FB_CLEAR_CTRL. ++ ++MSR IA32_MCU_OPT_CTRL ++--------------------- ++Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR ++action. This may be useful to reduce the performance impact of FB_CLEAR in ++cases where system software deems it warranted (for example, when performance ++is more critical, or the untrusted software has no MMIO access). Note that ++FB_CLEAR_DIS has no impact on enumeration (for example, it does not change ++FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors ++that enumerate FB_CLEAR. ++ ++Mitigation ++========== ++Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the ++same mitigation strategy to force the CPU to clear the affected buffers before ++an attacker can extract the secrets. ++ ++This is achieved by using the otherwise unused and obsolete VERW instruction in ++combination with a microcode update. The microcode clears the affected CPU ++buffers when the VERW instruction is executed. ++ ++Kernel reuses the MDS function to invoke the buffer clearing: ++ ++ mds_clear_cpu_buffers() ++ ++On MDS affected CPUs, the kernel already invokes CPU buffer clear on ++kernel/userspace, hypervisor/guest and C-state (idle) transitions. No ++additional mitigation is needed on such CPUs. ++ ++For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker ++with MMIO capability. Therefore, VERW is not required for kernel/userspace. For ++virtualization case, VERW is only needed at VMENTER for a guest with MMIO ++capability. ++ ++Mitigation points ++----------------- ++Return to user space ++^^^^^^^^^^^^^^^^^^^^ ++Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation ++needed. ++ ++C-State transition ++^^^^^^^^^^^^^^^^^^ ++Control register writes by CPU during C-state transition can propagate data ++from fill buffer to uncore buffers. Execute VERW before C-state transition to ++clear CPU fill buffers. ++ ++Guest entry point ++^^^^^^^^^^^^^^^^^ ++Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise ++execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by ++MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO ++Stale Data vulnerabilities, so there is no need to execute VERW for such guests. ++ ++Mitigation control on the kernel command line ++--------------------------------------------- ++The kernel command line allows to control the Processor MMIO Stale Data ++mitigations at boot time with the option "mmio_stale_data=". The valid ++arguments for this option are: ++ ++ ========== ================================================================= ++ full If the CPU is vulnerable, enable mitigation; CPU buffer clearing ++ on exit to userspace and when entering a VM. Idle transitions are ++ protected as well. It does not automatically disable SMT. ++ full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the ++ complete mitigation. ++ off Disables mitigation completely. ++ ========== ================================================================= ++ ++If the CPU is affected and mmio_stale_data=off is not supplied on the kernel ++command line, then the kernel selects the appropriate mitigation. ++ ++Mitigation status information ++----------------------------- ++The Linux kernel provides a sysfs interface to enumerate the current ++vulnerability status of the system: whether the system is vulnerable, and ++which mitigations are active. The relevant sysfs file is: ++ ++ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data ++ ++The possible values in this file are: ++ ++ .. list-table:: ++ ++ * - 'Not affected' ++ - The processor is not vulnerable ++ * - 'Vulnerable' ++ - The processor is vulnerable, but no mitigation enabled ++ * - 'Vulnerable: Clear CPU buffers attempted, no microcode' ++ - The processor is vulnerable, but microcode is not updated. The ++ mitigation is enabled on a best effort basis. ++ * - 'Mitigation: Clear CPU buffers' ++ - The processor is vulnerable and the CPU buffer clearing mitigation is ++ enabled. ++ ++If the processor is vulnerable then the following information is appended to ++the above information: ++ ++ ======================== =========================================== ++ 'SMT vulnerable' SMT is enabled ++ 'SMT disabled' SMT is disabled ++ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown ++ ======================== =========================================== ++ ++References ++---------- ++.. [#f1] Affected Processors ++ https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html diff --git a/queue-5.10/kvm-x86-speculation-disable-fill-buffer-clear-within-guests.patch b/queue-5.10/kvm-x86-speculation-disable-fill-buffer-clear-within-guests.patch new file mode 100644 index 00000000000..717f61436ba --- /dev/null +++ b/queue-5.10/kvm-x86-speculation-disable-fill-buffer-clear-within-guests.patch @@ -0,0 +1,229 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:35:15 -0700 +Subject: KVM: x86/speculation: Disable Fill buffer clear within guests + +From: Pawan Gupta + +commit 027bbb884be006b05d9c577d6401686053aa789e upstream + +The enumeration of MD_CLEAR in CPUID(EAX=7,ECX=0).EDX{bit 10} is not an +accurate indicator on all CPUs of whether the VERW instruction will +overwrite fill buffers. FB_CLEAR enumeration in +IA32_ARCH_CAPABILITIES{bit 17} covers the case of CPUs that are not +vulnerable to MDS/TAA, indicating that microcode does overwrite fill +buffers. + +Guests running in VMM environments may not be aware of all the +capabilities/vulnerabilities of the host CPU. Specifically, a guest may +apply MDS/TAA mitigations when a virtual CPU is enumerated as vulnerable +to MDS/TAA even when the physical CPU is not. On CPUs that enumerate +FB_CLEAR_CTRL the VMM may set FB_CLEAR_DIS to skip overwriting of fill +buffers by the VERW instruction. This is done by setting FB_CLEAR_DIS +during VMENTER and resetting on VMEXIT. For guests that enumerate +FB_CLEAR (explicitly asking for fill buffer clear capability) the VMM +will not use FB_CLEAR_DIS. + +Irrespective of guest state, host overwrites CPU buffers before VMENTER +to protect itself from an MMIO capable guest, as part of mitigation for +MMIO Stale Data vulnerabilities. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/msr-index.h | 6 ++ + arch/x86/kvm/vmx/vmx.c | 69 +++++++++++++++++++++++++++++++++ + arch/x86/kvm/vmx/vmx.h | 2 + arch/x86/kvm/x86.c | 3 + + tools/arch/x86/include/asm/msr-index.h | 6 ++ + 5 files changed, 86 insertions(+) + +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -133,6 +133,11 @@ + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ ++#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* ++ * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] ++ * bit available to control VERW ++ * behavior. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* +@@ -150,6 +155,7 @@ + /* SRBDS support */ + #define MSR_IA32_MCU_OPT_CTRL 0x00000123 + #define RNGDS_MITG_DIS BIT(0) ++#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ + + #define MSR_IA32_SYSENTER_CS 0x00000174 + #define MSR_IA32_SYSENTER_ESP 0x00000175 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -226,6 +226,9 @@ static const struct { + #define L1D_CACHE_ORDER 4 + static void *vmx_l1d_flush_pages; + ++/* Control for disabling CPU Fill buffer clear */ ++static bool __read_mostly vmx_fb_clear_ctrl_available; ++ + static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) + { + struct page *page; +@@ -357,6 +360,60 @@ static int vmentry_l1d_flush_get(char *s + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); + } + ++static void vmx_setup_fb_clear_ctrl(void) ++{ ++ u64 msr; ++ ++ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && ++ !boot_cpu_has_bug(X86_BUG_MDS) && ++ !boot_cpu_has_bug(X86_BUG_TAA)) { ++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); ++ if (msr & ARCH_CAP_FB_CLEAR_CTRL) ++ vmx_fb_clear_ctrl_available = true; ++ } ++} ++ ++static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) ++{ ++ u64 msr; ++ ++ if (!vmx->disable_fb_clear) ++ return; ++ ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr); ++ msr |= FB_CLEAR_DIS; ++ wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); ++ /* Cache the MSR value to avoid reading it later */ ++ vmx->msr_ia32_mcu_opt_ctrl = msr; ++} ++ ++static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) ++{ ++ if (!vmx->disable_fb_clear) ++ return; ++ ++ vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; ++ wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); ++} ++ ++static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ++{ ++ vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; ++ ++ /* ++ * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS ++ * at VMEntry. Skip the MSR read/write when a guest has no use case to ++ * execute VERW. ++ */ ++ if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || ++ ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && ++ (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && ++ (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && ++ (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && ++ (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) ++ vmx->disable_fb_clear = false; ++} ++ + static const struct kernel_param_ops vmentry_l1d_flush_ops = { + .set = vmentry_l1d_flush_set, + .get = vmentry_l1d_flush_get, +@@ -2211,6 +2268,10 @@ static int vmx_set_msr(struct kvm_vcpu * + ret = kvm_set_msr_common(vcpu, msr_info); + } + ++ /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ ++ if (msr_index == MSR_IA32_ARCH_CAPABILITIES) ++ vmx_update_fb_clear_dis(vcpu, vmx); ++ + return ret; + } + +@@ -4483,6 +4544,8 @@ static void vmx_vcpu_reset(struct kvm_vc + vpid_sync_context(vmx->vpid); + if (init_event) + vmx_clear_hlt(vcpu); ++ ++ vmx_update_fb_clear_dis(vcpu, vmx); + } + + static void enable_irq_window(struct kvm_vcpu *vcpu) +@@ -6658,6 +6721,8 @@ static noinstr void vmx_vcpu_enter_exit( + kvm_arch_has_assigned_device(vcpu->kvm)) + mds_clear_cpu_buffers(); + ++ vmx_disable_fb_clear(vmx); ++ + if (vcpu->arch.cr2 != native_read_cr2()) + native_write_cr2(vcpu->arch.cr2); + +@@ -6666,6 +6731,8 @@ static noinstr void vmx_vcpu_enter_exit( + + vcpu->arch.cr2 = native_read_cr2(); + ++ vmx_enable_fb_clear(vmx); ++ + /* + * VMEXIT disables interrupts (host state), but tracing and lockdep + * have them in state 'on' as recorded before entering guest mode. +@@ -8050,6 +8117,8 @@ static int __init vmx_init(void) + return r; + } + ++ vmx_setup_fb_clear_ctrl(); ++ + for_each_possible_cpu(cpu) { + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); + +--- a/arch/x86/kvm/vmx/vmx.h ++++ b/arch/x86/kvm/vmx/vmx.h +@@ -300,6 +300,8 @@ struct vcpu_vmx { + u64 msr_ia32_feature_control; + u64 msr_ia32_feature_control_valid_bits; + u64 ept_pointer; ++ u64 msr_ia32_mcu_opt_ctrl; ++ bool disable_fb_clear; + + struct pt_desc pt_desc; + +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1415,6 +1415,9 @@ static u64 kvm_get_arch_capabilities(voi + */ + } + ++ /* Guests don't need to know "Fill buffer clear control" exists */ ++ data &= ~ARCH_CAP_FB_CLEAR_CTRL; ++ + return data; + } + +--- a/tools/arch/x86/include/asm/msr-index.h ++++ b/tools/arch/x86/include/asm/msr-index.h +@@ -133,6 +133,11 @@ + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ ++#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* ++ * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] ++ * bit available to control VERW ++ * behavior. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* +@@ -150,6 +155,7 @@ + /* SRBDS support */ + #define MSR_IA32_MCU_OPT_CTRL 0x00000123 + #define RNGDS_MITG_DIS BIT(0) ++#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ + + #define MSR_IA32_SYSENTER_CS 0x00000174 + #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/queue-5.10/x86-bugs-group-mds-taa-processor-mmio-stale-data-mitigations.patch b/queue-5.10/x86-bugs-group-mds-taa-processor-mmio-stale-data-mitigations.patch new file mode 100644 index 00000000000..a81ab7cb2da --- /dev/null +++ b/queue-5.10/x86-bugs-group-mds-taa-processor-mmio-stale-data-mitigations.patch @@ -0,0 +1,79 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:30:12 -0700 +Subject: x86/bugs: Group MDS, TAA & Processor MMIO Stale Data mitigations + +From: Pawan Gupta + +commit e5925fb867290ee924fcf2fe3ca887b792714366 upstream + +MDS, TAA and Processor MMIO Stale Data mitigations rely on clearing CPU +buffers. Moreover, status of these mitigations affects each other. +During boot, it is important to maintain the order in which these +mitigations are selected. This is especially true for +md_clear_update_mitigation() that needs to be called after MDS, TAA and +Processor MMIO Stale Data mitigation selection is done. + +Introduce md_clear_select_mitigation(), and select all these mitigations +from there. This reflects relationships between these mitigations and +ensures proper ordering. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 26 ++++++++++++++++---------- + 1 file changed, 16 insertions(+), 10 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -42,6 +42,7 @@ static void __init ssb_select_mitigation + static void __init l1tf_select_mitigation(void); + static void __init mds_select_mitigation(void); + static void __init md_clear_update_mitigation(void); ++static void __init md_clear_select_mitigation(void); + static void __init taa_select_mitigation(void); + static void __init mmio_select_mitigation(void); + static void __init srbds_select_mitigation(void); +@@ -114,18 +115,9 @@ void __init check_bugs(void) + spectre_v2_select_mitigation(); + ssb_select_mitigation(); + l1tf_select_mitigation(); +- mds_select_mitigation(); +- taa_select_mitigation(); +- mmio_select_mitigation(); ++ md_clear_select_mitigation(); + srbds_select_mitigation(); + +- /* +- * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update +- * and print their mitigation after MDS, TAA and MMIO Stale Data +- * mitigation selection is done. +- */ +- md_clear_update_mitigation(); +- + arch_smt_update(); + + #ifdef CONFIG_X86_32 +@@ -511,6 +503,20 @@ out: + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); + } + ++static void __init md_clear_select_mitigation(void) ++{ ++ mds_select_mitigation(); ++ taa_select_mitigation(); ++ mmio_select_mitigation(); ++ ++ /* ++ * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update ++ * and print their mitigation after MDS, TAA and MMIO Stale Data ++ * mitigation selection is done. ++ */ ++ md_clear_update_mitigation(); ++} ++ + #undef pr_fmt + #define pr_fmt(fmt) "SRBDS: " fmt + diff --git a/queue-5.10/x86-speculation-add-a-common-function-for-md_clear-mitigation-update.patch b/queue-5.10/x86-speculation-add-a-common-function-for-md_clear-mitigation-update.patch new file mode 100644 index 00000000000..6f69ee080dc --- /dev/null +++ b/queue-5.10/x86-speculation-add-a-common-function-for-md_clear-mitigation-update.patch @@ -0,0 +1,135 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:28:10 -0700 +Subject: x86/speculation: Add a common function for MD_CLEAR mitigation update + +From: Pawan Gupta + +commit f52ea6c26953fed339aa4eae717ee5c2133c7ff2 upstream + +Processor MMIO Stale Data mitigation uses similar mitigation as MDS and +TAA. In preparation for adding its mitigation, add a common function to +update all mitigations that depend on MD_CLEAR. + + [ bp: Add a newline in md_clear_update_mitigation() to separate + statements better. ] + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 59 +++++++++++++++++++++++++-------------------- + 1 file changed, 33 insertions(+), 26 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -41,7 +41,7 @@ static void __init spectre_v2_select_mit + static void __init ssb_select_mitigation(void); + static void __init l1tf_select_mitigation(void); + static void __init mds_select_mitigation(void); +-static void __init mds_print_mitigation(void); ++static void __init md_clear_update_mitigation(void); + static void __init taa_select_mitigation(void); + static void __init srbds_select_mitigation(void); + +@@ -114,10 +114,10 @@ void __init check_bugs(void) + srbds_select_mitigation(); + + /* +- * As MDS and TAA mitigations are inter-related, print MDS +- * mitigation until after TAA mitigation selection is done. ++ * As MDS and TAA mitigations are inter-related, update and print their ++ * mitigation after TAA mitigation selection is done. + */ +- mds_print_mitigation(); ++ md_clear_update_mitigation(); + + arch_smt_update(); + +@@ -258,14 +258,6 @@ static void __init mds_select_mitigation + } + } + +-static void __init mds_print_mitigation(void) +-{ +- if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) +- return; +- +- pr_info("%s\n", mds_strings[mds_mitigation]); +-} +- + static int __init mds_cmdline(char *str) + { + if (!boot_cpu_has_bug(X86_BUG_MDS)) +@@ -320,7 +312,7 @@ static void __init taa_select_mitigation + /* TSX previously disabled by tsx=off */ + if (!boot_cpu_has(X86_FEATURE_RTM)) { + taa_mitigation = TAA_MITIGATION_TSX_DISABLED; +- goto out; ++ return; + } + + if (cpu_mitigations_off()) { +@@ -334,7 +326,7 @@ static void __init taa_select_mitigation + */ + if (taa_mitigation == TAA_MITIGATION_OFF && + mds_mitigation == MDS_MITIGATION_OFF) +- goto out; ++ return; + + if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) + taa_mitigation = TAA_MITIGATION_VERW; +@@ -366,18 +358,6 @@ static void __init taa_select_mitigation + + if (taa_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +- +- /* +- * Update MDS mitigation, if necessary, as the mds_user_clear is +- * now enabled for TAA mitigation. +- */ +- if (mds_mitigation == MDS_MITIGATION_OFF && +- boot_cpu_has_bug(X86_BUG_MDS)) { +- mds_mitigation = MDS_MITIGATION_FULL; +- mds_select_mitigation(); +- } +-out: +- pr_info("%s\n", taa_strings[taa_mitigation]); + } + + static int __init tsx_async_abort_parse_cmdline(char *str) +@@ -402,6 +382,33 @@ static int __init tsx_async_abort_parse_ + early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + + #undef pr_fmt ++#define pr_fmt(fmt) "" fmt ++ ++static void __init md_clear_update_mitigation(void) ++{ ++ if (cpu_mitigations_off()) ++ return; ++ ++ if (!static_key_enabled(&mds_user_clear)) ++ goto out; ++ ++ /* ++ * mds_user_clear is now enabled. Update MDS mitigation, if ++ * necessary. ++ */ ++ if (mds_mitigation == MDS_MITIGATION_OFF && ++ boot_cpu_has_bug(X86_BUG_MDS)) { ++ mds_mitigation = MDS_MITIGATION_FULL; ++ mds_select_mitigation(); ++ } ++out: ++ if (boot_cpu_has_bug(X86_BUG_MDS)) ++ pr_info("MDS: %s\n", mds_strings[mds_mitigation]); ++ if (boot_cpu_has_bug(X86_BUG_TAA)) ++ pr_info("TAA: %s\n", taa_strings[taa_mitigation]); ++} ++ ++#undef pr_fmt + #define pr_fmt(fmt) "SRBDS: " fmt + + enum srbds_mitigations { diff --git a/queue-5.10/x86-speculation-mmio-add-mitigation-for-processor-mmio-stale-data.patch b/queue-5.10/x86-speculation-mmio-add-mitigation-for-processor-mmio-stale-data.patch new file mode 100644 index 00000000000..26cc3d1d921 --- /dev/null +++ b/queue-5.10/x86-speculation-mmio-add-mitigation-for-processor-mmio-stale-data.patch @@ -0,0 +1,302 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:29:11 -0700 +Subject: x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data + +From: Pawan Gupta + +commit 8cb861e9e3c9a55099ad3d08e1a3b653d29c33ca upstream + +Processor MMIO Stale Data is a class of vulnerabilities that may +expose data after an MMIO operation. For details please refer to +Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst. + +These vulnerabilities are broadly categorized as: + +Device Register Partial Write (DRPW): + Some endpoint MMIO registers incorrectly handle writes that are + smaller than the register size. Instead of aborting the write or only + copying the correct subset of bytes (for example, 2 bytes for a 2-byte + write), more bytes than specified by the write transaction may be + written to the register. On some processors, this may expose stale + data from the fill buffers of the core that created the write + transaction. + +Shared Buffers Data Sampling (SBDS): + After propagators may have moved data around the uncore and copied + stale data into client core fill buffers, processors affected by MFBDS + can leak data from the fill buffer. + +Shared Buffers Data Read (SBDR): + It is similar to Shared Buffer Data Sampling (SBDS) except that the + data is directly read into the architectural software-visible state. + +An attacker can use these vulnerabilities to extract data from CPU fill +buffers using MDS and TAA methods. Mitigate it by clearing the CPU fill +buffers using the VERW instruction before returning to a user or a +guest. + +On CPUs not affected by MDS and TAA, user application cannot sample data +from CPU fill buffers using MDS or TAA. A guest with MMIO access can +still use DRPW or SBDR to extract data architecturally. Mitigate it with +VERW instruction to clear fill buffers before VMENTER for MMIO capable +guests. + +Add a kernel parameter mmio_stale_data={off|full|full,nosmt} to control +the mitigation. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 36 +++++++ + arch/x86/include/asm/nospec-branch.h | 2 + arch/x86/kernel/cpu/bugs.c | 111 +++++++++++++++++++++++- + arch/x86/kvm/vmx/vmx.c | 3 + 4 files changed, 148 insertions(+), 4 deletions(-) + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2872,6 +2872,7 @@ + kvm.nx_huge_pages=off [X86] + no_entry_flush [PPC] + no_uaccess_flush [PPC] ++ mmio_stale_data=off [X86] + + Exceptions: + This does not have any effect on +@@ -2893,6 +2894,7 @@ + Equivalent to: l1tf=flush,nosmt [X86] + mds=full,nosmt [X86] + tsx_async_abort=full,nosmt [X86] ++ mmio_stale_data=full,nosmt [X86] + + mminit_loglevel= + [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this +@@ -2902,6 +2904,40 @@ + log everything. Information is printed at KERN_DEBUG + so loglevel=8 may also need to be specified. + ++ mmio_stale_data= ++ [X86,INTEL] Control mitigation for the Processor ++ MMIO Stale Data vulnerabilities. ++ ++ Processor MMIO Stale Data is a class of ++ vulnerabilities that may expose data after an MMIO ++ operation. Exposed data could originate or end in ++ the same CPU buffers as affected by MDS and TAA. ++ Therefore, similar to MDS and TAA, the mitigation ++ is to clear the affected CPU buffers. ++ ++ This parameter controls the mitigation. The ++ options are: ++ ++ full - Enable mitigation on vulnerable CPUs ++ ++ full,nosmt - Enable mitigation and disable SMT on ++ vulnerable CPUs. ++ ++ off - Unconditionally disable mitigation ++ ++ On MDS or TAA affected machines, ++ mmio_stale_data=off can be prevented by an active ++ MDS or TAA mitigation as these vulnerabilities are ++ mitigated with the same mechanism so in order to ++ disable this mitigation, you need to specify ++ mds=off and tsx_async_abort=off too. ++ ++ Not specifying this option is equivalent to ++ mmio_stale_data=full. ++ ++ For details see: ++ Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst ++ + module.sig_enforce + [KNL] When CONFIG_MODULE_SIG is set, this means that + modules without (valid) signatures will fail to load. +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -255,6 +255,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_alway + DECLARE_STATIC_KEY_FALSE(mds_user_clear); + DECLARE_STATIC_KEY_FALSE(mds_idle_clear); + ++DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); ++ + #include + + /** +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -43,6 +43,7 @@ static void __init l1tf_select_mitigatio + static void __init mds_select_mitigation(void); + static void __init md_clear_update_mitigation(void); + static void __init taa_select_mitigation(void); ++static void __init mmio_select_mitigation(void); + static void __init srbds_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ +@@ -77,6 +78,10 @@ EXPORT_SYMBOL_GPL(mds_user_clear); + DEFINE_STATIC_KEY_FALSE(mds_idle_clear); + EXPORT_SYMBOL_GPL(mds_idle_clear); + ++/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ ++DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); ++EXPORT_SYMBOL_GPL(mmio_stale_data_clear); ++ + void __init check_bugs(void) + { + identify_boot_cpu(); +@@ -111,11 +116,13 @@ void __init check_bugs(void) + l1tf_select_mitigation(); + mds_select_mitigation(); + taa_select_mitigation(); ++ mmio_select_mitigation(); + srbds_select_mitigation(); + + /* +- * As MDS and TAA mitigations are inter-related, update and print their +- * mitigation after TAA mitigation selection is done. ++ * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update ++ * and print their mitigation after MDS, TAA and MMIO Stale Data ++ * mitigation selection is done. + */ + md_clear_update_mitigation(); + +@@ -382,6 +389,90 @@ static int __init tsx_async_abort_parse_ + early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + + #undef pr_fmt ++#define pr_fmt(fmt) "MMIO Stale Data: " fmt ++ ++enum mmio_mitigations { ++ MMIO_MITIGATION_OFF, ++ MMIO_MITIGATION_UCODE_NEEDED, ++ MMIO_MITIGATION_VERW, ++}; ++ ++/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ ++static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; ++static bool mmio_nosmt __ro_after_init = false; ++ ++static const char * const mmio_strings[] = { ++ [MMIO_MITIGATION_OFF] = "Vulnerable", ++ [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", ++ [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", ++}; ++ ++static void __init mmio_select_mitigation(void) ++{ ++ u64 ia32_cap; ++ ++ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || ++ cpu_mitigations_off()) { ++ mmio_mitigation = MMIO_MITIGATION_OFF; ++ return; ++ } ++ ++ if (mmio_mitigation == MMIO_MITIGATION_OFF) ++ return; ++ ++ ia32_cap = x86_read_arch_cap_msr(); ++ ++ /* ++ * Enable CPU buffer clear mitigation for host and VMM, if also affected ++ * by MDS or TAA. Otherwise, enable mitigation for VMM only. ++ */ ++ if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && ++ boot_cpu_has(X86_FEATURE_RTM))) ++ static_branch_enable(&mds_user_clear); ++ else ++ static_branch_enable(&mmio_stale_data_clear); ++ ++ /* ++ * Check if the system has the right microcode. ++ * ++ * CPU Fill buffer clear mitigation is enumerated by either an explicit ++ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS ++ * affected systems. ++ */ ++ if ((ia32_cap & ARCH_CAP_FB_CLEAR) || ++ (boot_cpu_has(X86_FEATURE_MD_CLEAR) && ++ boot_cpu_has(X86_FEATURE_FLUSH_L1D) && ++ !(ia32_cap & ARCH_CAP_MDS_NO))) ++ mmio_mitigation = MMIO_MITIGATION_VERW; ++ else ++ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; ++ ++ if (mmio_nosmt || cpu_mitigations_auto_nosmt()) ++ cpu_smt_disable(false); ++} ++ ++static int __init mmio_stale_data_parse_cmdline(char *str) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) ++ return 0; ++ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) { ++ mmio_mitigation = MMIO_MITIGATION_OFF; ++ } else if (!strcmp(str, "full")) { ++ mmio_mitigation = MMIO_MITIGATION_VERW; ++ } else if (!strcmp(str, "full,nosmt")) { ++ mmio_mitigation = MMIO_MITIGATION_VERW; ++ mmio_nosmt = true; ++ } ++ ++ return 0; ++} ++early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); ++ ++#undef pr_fmt + #define pr_fmt(fmt) "" fmt + + static void __init md_clear_update_mitigation(void) +@@ -393,19 +484,31 @@ static void __init md_clear_update_mitig + goto out; + + /* +- * mds_user_clear is now enabled. Update MDS mitigation, if +- * necessary. ++ * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data ++ * mitigation, if necessary. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } ++ if (taa_mitigation == TAA_MITIGATION_OFF && ++ boot_cpu_has_bug(X86_BUG_TAA)) { ++ taa_mitigation = TAA_MITIGATION_VERW; ++ taa_select_mitigation(); ++ } ++ if (mmio_mitigation == MMIO_MITIGATION_OFF && ++ boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { ++ mmio_mitigation = MMIO_MITIGATION_VERW; ++ mmio_select_mitigation(); ++ } + out: + if (boot_cpu_has_bug(X86_BUG_MDS)) + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); + if (boot_cpu_has_bug(X86_BUG_TAA)) + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); ++ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) ++ pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); + } + + #undef pr_fmt +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6654,6 +6654,9 @@ static noinstr void vmx_vcpu_enter_exit( + vmx_l1d_flush(vcpu); + else if (static_branch_unlikely(&mds_user_clear)) + mds_clear_cpu_buffers(); ++ else if (static_branch_unlikely(&mmio_stale_data_clear) && ++ kvm_arch_has_assigned_device(vcpu->kvm)) ++ mds_clear_cpu_buffers(); + + if (vcpu->arch.cr2 != native_read_cr2()) + native_write_cr2(vcpu->arch.cr2); diff --git a/queue-5.10/x86-speculation-mmio-add-sysfs-reporting-for-processor-mmio-stale-data.patch b/queue-5.10/x86-speculation-mmio-add-sysfs-reporting-for-processor-mmio-stale-data.patch new file mode 100644 index 00000000000..3d05998cf94 --- /dev/null +++ b/queue-5.10/x86-speculation-mmio-add-sysfs-reporting-for-processor-mmio-stale-data.patch @@ -0,0 +1,120 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:32:13 -0700 +Subject: x86/speculation/mmio: Add sysfs reporting for Processor MMIO Stale Data + +From: Pawan Gupta + +commit 8d50cdf8b8341770bc6367bce40c0c1bb0e1d5b3 upstream + +Add the sysfs reporting file for Processor MMIO Stale Data +vulnerability. It exposes the vulnerability and mitigation state similar +to the existing files for the other hardware vulnerabilities. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/ABI/testing/sysfs-devices-system-cpu | 1 + arch/x86/kernel/cpu/bugs.c | 22 +++++++++++++++++++++ + drivers/base/cpu.c | 8 +++++++ + include/linux/cpu.h | 3 ++ + 4 files changed, 34 insertions(+) + +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -510,6 +510,7 @@ What: /sys/devices/system/cpu/vulnerabi + /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort + /sys/devices/system/cpu/vulnerabilities/itlb_multihit ++ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + Date: January 2018 + Contact: Linux kernel mailing list + Description: Information about CPU vulnerabilities +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1832,6 +1832,20 @@ static ssize_t tsx_async_abort_show_stat + sched_smt_active() ? "vulnerable" : "disabled"); + } + ++static ssize_t mmio_stale_data_show_state(char *buf) ++{ ++ if (mmio_mitigation == MMIO_MITIGATION_OFF) ++ return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); ++ ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { ++ return sysfs_emit(buf, "%s; SMT Host state unknown\n", ++ mmio_strings[mmio_mitigation]); ++ } ++ ++ return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], ++ sched_smt_active() ? "vulnerable" : "disabled"); ++} ++ + static char *stibp_state(void) + { + if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) +@@ -1932,6 +1946,9 @@ static ssize_t cpu_show_common(struct de + case X86_BUG_SRBDS: + return srbds_show_state(buf); + ++ case X86_BUG_MMIO_STALE_DATA: ++ return mmio_stale_data_show_state(buf); ++ + default: + break; + } +@@ -1983,4 +2000,9 @@ ssize_t cpu_show_srbds(struct device *de + { + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); + } ++ ++ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); ++} + #endif +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -566,6 +566,12 @@ ssize_t __weak cpu_show_srbds(struct dev + return sysfs_emit(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sysfs_emit(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +@@ -575,6 +581,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_m + static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); + static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); + static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); ++static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -586,6 +593,7 @@ static struct attribute *cpu_root_vulner + &dev_attr_tsx_async_abort.attr, + &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, ++ &dev_attr_mmio_stale_data.attr, + NULL + }; + +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -65,6 +65,9 @@ extern ssize_t cpu_show_tsx_async_abort( + extern ssize_t cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_mmio_stale_data(struct device *dev, ++ struct device_attribute *attr, ++ char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/queue-5.10/x86-speculation-mmio-enable-cpu-fill-buffer-clearing-on-idle.patch b/queue-5.10/x86-speculation-mmio-enable-cpu-fill-buffer-clearing-on-idle.patch new file mode 100644 index 00000000000..6884b87a2a2 --- /dev/null +++ b/queue-5.10/x86-speculation-mmio-enable-cpu-fill-buffer-clearing-on-idle.patch @@ -0,0 +1,68 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:31:12 -0700 +Subject: x86/speculation/mmio: Enable CPU Fill buffer clearing on idle + +From: Pawan Gupta + +commit 99a83db5a605137424e1efe29dc0573d6a5b6316 upstream + +When the CPU is affected by Processor MMIO Stale Data vulnerabilities, +Fill Buffer Stale Data Propagator (FBSDP) can propagate stale data out +of Fill buffer to uncore buffer when CPU goes idle. Stale data can then +be exploited with other variants using MMIO operations. + +Mitigate it by clearing the Fill buffer before entering idle state. + +Signed-off-by: Pawan Gupta +Signed-off-by: Thomas Gleixner +Co-developed-by: Josh Poimboeuf +Signed-off-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -425,6 +425,14 @@ static void __init mmio_select_mitigatio + static_branch_enable(&mmio_stale_data_clear); + + /* ++ * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can ++ * be propagated to uncore buffers, clearing the Fill buffers on idle ++ * is required irrespective of SMT state. ++ */ ++ if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) ++ static_branch_enable(&mds_idle_clear); ++ ++ /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit +@@ -1188,6 +1196,8 @@ static void update_indir_branch_cond(voi + /* Update the static key controlling the MDS CPU buffer clear in idle */ + static void update_mds_branch_idle(void) + { ++ u64 ia32_cap = x86_read_arch_cap_msr(); ++ + /* + * Enable the idle clearing if SMT is active on CPUs which are + * affected only by MSBDS and not any other MDS variant. +@@ -1199,10 +1209,12 @@ static void update_mds_branch_idle(void) + if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) + return; + +- if (sched_smt_active()) ++ if (sched_smt_active()) { + static_branch_enable(&mds_idle_clear); +- else ++ } else if (mmio_mitigation == MMIO_MITIGATION_OFF || ++ (ia32_cap & ARCH_CAP_FBSDP_NO)) { + static_branch_disable(&mds_idle_clear); ++ } + } + + #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" diff --git a/queue-5.10/x86-speculation-mmio-enumerate-processor-mmio-stale-data-bug.patch b/queue-5.10/x86-speculation-mmio-enumerate-processor-mmio-stale-data-bug.patch new file mode 100644 index 00000000000..5244a998726 --- /dev/null +++ b/queue-5.10/x86-speculation-mmio-enumerate-processor-mmio-stale-data-bug.patch @@ -0,0 +1,178 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:27:08 -0700 +Subject: x86/speculation/mmio: Enumerate Processor MMIO Stale Data bug + +From: Pawan Gupta + +commit 51802186158c74a0304f51ab963e7c2b3a2b046f upstream + +Processor MMIO Stale Data is a class of vulnerabilities that may +expose data after an MMIO operation. For more details please refer to +Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + +Add the Processor MMIO Stale Data bug enumeration. A microcode update +adds new bits to the MSR IA32_ARCH_CAPABILITIES, define them. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/msr-index.h | 19 +++++++++++++ + arch/x86/kernel/cpu/common.c | 43 +++++++++++++++++++++++++++++-- + tools/arch/x86/include/asm/cpufeatures.h | 1 + tools/arch/x86/include/asm/msr-index.h | 19 +++++++++++++ + 5 files changed, 81 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -417,5 +417,6 @@ + #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ ++#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -114,6 +114,25 @@ + * Not susceptible to + * TSX Async Abort (TAA) vulnerabilities. + */ ++#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* ++ * Not susceptible to SBDR and SSDP ++ * variants of Processor MMIO stale data ++ * vulnerabilities. ++ */ ++#define ARCH_CAP_FBSDP_NO BIT(14) /* ++ * Not susceptible to FBSDP variant of ++ * Processor MMIO stale data ++ * vulnerabilities. ++ */ ++#define ARCH_CAP_PSDP_NO BIT(15) /* ++ * Not susceptible to PSDP variant of ++ * Processor MMIO stale data ++ * vulnerabilities. ++ */ ++#define ARCH_CAP_FB_CLEAR BIT(17) /* ++ * VERW clears CPU fill buffer ++ * even on MDS_NO CPUs. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1098,18 +1098,39 @@ static const __initconst struct x86_cpu_ + X86_FEATURE_ANY, issues) + + #define SRBDS BIT(0) ++/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ ++#define MMIO BIT(1) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) | ++ BIT(7) | BIT(0xB), MMIO), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), ++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO), + {} + }; + +@@ -1130,6 +1151,13 @@ u64 x86_read_arch_cap_msr(void) + return ia32_cap; + } + ++static bool arch_cap_mmio_immune(u64 ia32_cap) ++{ ++ return (ia32_cap & ARCH_CAP_FBSDP_NO && ++ ia32_cap & ARCH_CAP_PSDP_NO && ++ ia32_cap & ARCH_CAP_SBDR_SSDP_NO); ++} ++ + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { + u64 ia32_cap = x86_read_arch_cap_msr(); +@@ -1189,6 +1217,17 @@ static void __init cpu_set_bug_bits(stru + cpu_matches(cpu_vuln_blacklist, SRBDS)) + setup_force_cpu_bug(X86_BUG_SRBDS); + ++ /* ++ * Processor MMIO Stale Data bug enumeration ++ * ++ * Affected CPU list is generally enough to enumerate the vulnerability, ++ * but for virtualization case check for ARCH_CAP MSR bits also, VMM may ++ * not want the guest to enumerate the bug. ++ */ ++ if (cpu_matches(cpu_vuln_blacklist, MMIO) && ++ !arch_cap_mmio_immune(ia32_cap)) ++ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +--- a/tools/arch/x86/include/asm/cpufeatures.h ++++ b/tools/arch/x86/include/asm/cpufeatures.h +@@ -417,5 +417,6 @@ + #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ ++#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/tools/arch/x86/include/asm/msr-index.h ++++ b/tools/arch/x86/include/asm/msr-index.h +@@ -114,6 +114,25 @@ + * Not susceptible to + * TSX Async Abort (TAA) vulnerabilities. + */ ++#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* ++ * Not susceptible to SBDR and SSDP ++ * variants of Processor MMIO stale data ++ * vulnerabilities. ++ */ ++#define ARCH_CAP_FBSDP_NO BIT(14) /* ++ * Not susceptible to FBSDP variant of ++ * Processor MMIO stale data ++ * vulnerabilities. ++ */ ++#define ARCH_CAP_PSDP_NO BIT(15) /* ++ * Not susceptible to PSDP variant of ++ * Processor MMIO stale data ++ * vulnerabilities. ++ */ ++#define ARCH_CAP_FB_CLEAR BIT(17) /* ++ * VERW clears CPU fill buffer ++ * even on MDS_NO CPUs. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* diff --git a/queue-5.10/x86-speculation-mmio-print-smt-warning.patch b/queue-5.10/x86-speculation-mmio-print-smt-warning.patch new file mode 100644 index 00000000000..3d3defed739 --- /dev/null +++ b/queue-5.10/x86-speculation-mmio-print-smt-warning.patch @@ -0,0 +1,46 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Josh Poimboeuf +Date: Mon, 23 May 2022 09:11:49 -0700 +Subject: x86/speculation/mmio: Print SMT warning + +From: Josh Poimboeuf + +commit 1dc6ff02c8bf77d71b9b5d11cbc9df77cfb28626 upstream + +Similar to MDS and TAA, print a warning if SMT is enabled for the MMIO +Stale Data vulnerability. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1221,6 +1221,7 @@ static void update_mds_branch_idle(void) + + #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" + #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" ++#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" + + void cpu_bugs_smt_update(void) + { +@@ -1265,6 +1266,16 @@ void cpu_bugs_smt_update(void) + break; + } + ++ switch (mmio_mitigation) { ++ case MMIO_MITIGATION_VERW: ++ case MMIO_MITIGATION_UCODE_NEEDED: ++ if (sched_smt_active()) ++ pr_warn_once(MMIO_MSG_SMT); ++ break; ++ case MMIO_MITIGATION_OFF: ++ break; ++ } ++ + mutex_unlock(&spec_ctrl_mutex); + } + diff --git a/queue-5.10/x86-speculation-mmio-reuse-srbds-mitigation-for-sbds.patch b/queue-5.10/x86-speculation-mmio-reuse-srbds-mitigation-for-sbds.patch new file mode 100644 index 00000000000..3fe0d3cebf8 --- /dev/null +++ b/queue-5.10/x86-speculation-mmio-reuse-srbds-mitigation-for-sbds.patch @@ -0,0 +1,81 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:34:14 -0700 +Subject: x86/speculation/mmio: Reuse SRBDS mitigation for SBDS + +From: Pawan Gupta + +commit a992b8a4682f119ae035a01b40d4d0665c4a2875 upstream + +The Shared Buffers Data Sampling (SBDS) variant of Processor MMIO Stale +Data vulnerabilities may expose RDRAND, RDSEED and SGX EGETKEY data. +Mitigation for this is added by a microcode update. + +As some of the implications of SBDS are similar to SRBDS, SRBDS mitigation +infrastructure can be leveraged by SBDS. Set X86_BUG_SRBDS and use SRBDS +mitigation. + +Mitigation is enabled by default; use srbds=off to opt-out. Mitigation +status can be checked from below file: + + /sys/devices/system/cpu/vulnerabilities/srbds + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/common.c | 21 ++++++++++++++------- + 1 file changed, 14 insertions(+), 7 deletions(-) + +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1100,6 +1100,8 @@ static const __initconst struct x86_cpu_ + #define SRBDS BIT(0) + /* CPU is affected by X86_BUG_MMIO_STALE_DATA */ + #define MMIO BIT(1) ++/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ ++#define MMIO_SBDS BIT(2) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1121,16 +1123,17 @@ static const struct x86_cpu_id cpu_vuln_ + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), +- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), +- VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO), +- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x1), MMIO), +- VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO), ++ VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO), +- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO), ++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), +- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO), ++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS), + {} + }; + +@@ -1211,10 +1214,14 @@ static void __init cpu_set_bug_bits(stru + /* + * SRBDS affects CPUs which support RDRAND or RDSEED and are listed + * in the vulnerability blacklist. ++ * ++ * Some of the implications and mitigation of Shared Buffers Data ++ * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as ++ * SRBDS. + */ + if ((cpu_has(c, X86_FEATURE_RDRAND) || + cpu_has(c, X86_FEATURE_RDSEED)) && +- cpu_matches(cpu_vuln_blacklist, SRBDS)) ++ cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) + setup_force_cpu_bug(X86_BUG_SRBDS); + + /* diff --git a/queue-5.10/x86-speculation-srbds-update-srbds-mitigation-selection.patch b/queue-5.10/x86-speculation-srbds-update-srbds-mitigation-selection.patch new file mode 100644 index 00000000000..27db320ed62 --- /dev/null +++ b/queue-5.10/x86-speculation-srbds-update-srbds-mitigation-selection.patch @@ -0,0 +1,45 @@ +From foo@baz Tue Jun 14 07:06:51 PM CEST 2022 +From: Pawan Gupta +Date: Thu, 19 May 2022 20:33:13 -0700 +Subject: x86/speculation/srbds: Update SRBDS mitigation selection + +From: Pawan Gupta + +commit 22cac9c677c95f3ac5c9244f8ca0afdc7c8afb19 upstream + +Currently, Linux disables SRBDS mitigation on CPUs not affected by +MDS and have the TSX feature disabled. On such CPUs, secrets cannot +be extracted from CPU fill buffers using MDS or TAA. Without SRBDS +mitigation, Processor MMIO Stale Data vulnerabilities can be used to +extract RDRAND, RDSEED, and EGETKEY data. + +Do not disable SRBDS mitigation by default when CPU is also affected by +Processor MMIO Stale Data vulnerabilities. + +Signed-off-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -586,11 +586,13 @@ static void __init srbds_select_mitigati + return; + + /* +- * Check to see if this is one of the MDS_NO systems supporting +- * TSX that are only exposed to SRBDS when TSX is enabled. ++ * Check to see if this is one of the MDS_NO systems supporting TSX that ++ * are only exposed to SRBDS when TSX is enabled or when CPU is affected ++ * by Processor MMIO Stale Data vulnerability. + */ + ia32_cap = x86_read_arch_cap_msr(); +- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) ++ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && ++ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; + else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; -- 2.47.3