From c4625b0b2dc15875d5f1adeb3153223de79599d3 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Tue, 14 Feb 2023 12:48:27 -0500 Subject: [PATCH] Fixes for 6.1 Signed-off-by: Sasha Levin --- ...-vuln-add-documentation-for-cross-th.patch | 132 ++++++++++++++++++ ...-the-cross-thread-return-address-pre.patch | 108 ++++++++++++++ queue-6.1/series | 3 + ...identify-processors-vulnerable-to-sm.patch | 79 +++++++++++ 4 files changed, 322 insertions(+) create mode 100644 queue-6.1/documentation-hw-vuln-add-documentation-for-cross-th.patch create mode 100644 queue-6.1/kvm-x86-mitigate-the-cross-thread-return-address-pre.patch create mode 100644 queue-6.1/x86-speculation-identify-processors-vulnerable-to-sm.patch diff --git a/queue-6.1/documentation-hw-vuln-add-documentation-for-cross-th.patch b/queue-6.1/documentation-hw-vuln-add-documentation-for-cross-th.patch new file mode 100644 index 00000000000..637db39f320 --- /dev/null +++ b/queue-6.1/documentation-hw-vuln-add-documentation-for-cross-th.patch @@ -0,0 +1,132 @@ +From 240dca8e9266ef15c517b21f15105bc4eb77657c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 9 Feb 2023 09:22:26 -0600 +Subject: Documentation/hw-vuln: Add documentation for Cross-Thread Return + Predictions + +From: Tom Lendacky + +[ Upstream commit 493a2c2d23ca91afba96ac32b6cbafb54382c2a3 ] + +Add the admin guide for the Cross-Thread Return Predictions vulnerability. + +Signed-off-by: Tom Lendacky +Message-Id: <60f9c0b4396956ce70499ae180cb548720b25c7e.1675956146.git.thomas.lendacky@amd.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + .../admin-guide/hw-vuln/cross-thread-rsb.rst | 92 +++++++++++++++++++ + Documentation/admin-guide/hw-vuln/index.rst | 1 + + 2 files changed, 93 insertions(+) + create mode 100644 Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst + +diff --git a/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst +new file mode 100644 +index 0000000000000..ec6e9f5bcf9e8 +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst +@@ -0,0 +1,92 @@ ++ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++Cross-Thread Return Address Predictions ++======================================= ++ ++Certain AMD and Hygon processors are subject to a cross-thread return address ++predictions vulnerability. When running in SMT mode and one sibling thread ++transitions out of C0 state, the other sibling thread could use return target ++predictions from the sibling thread that transitioned out of C0. ++ ++The Spectre v2 mitigations protect the Linux kernel, as it fills the return ++address prediction entries with safe targets when context switching to the idle ++thread. However, KVM does allow a VMM to prevent exiting guest mode when ++transitioning out of C0. This could result in a guest-controlled return target ++being consumed by the sibling thread. ++ ++Affected processors ++------------------- ++ ++The following CPUs are vulnerable: ++ ++ - AMD Family 17h processors ++ - Hygon Family 18h processors ++ ++Related CVEs ++------------ ++ ++The following CVE entry is related to this issue: ++ ++ ============== ======================================= ++ CVE-2022-27672 Cross-Thread Return Address Predictions ++ ============== ======================================= ++ ++Problem ++------- ++ ++Affected SMT-capable processors support 1T and 2T modes of execution when SMT ++is enabled. In 2T mode, both threads in a core are executing code. For the ++processor core to enter 1T mode, it is required that one of the threads ++requests to transition out of the C0 state. This can be communicated with the ++HLT instruction or with an MWAIT instruction that requests non-C0. ++When the thread re-enters the C0 state, the processor transitions back ++to 2T mode, assuming the other thread is also still in C0 state. ++ ++In affected processors, the return address predictor (RAP) is partitioned ++depending on the SMT mode. For instance, in 2T mode each thread uses a private ++16-entry RAP, but in 1T mode, the active thread uses a 32-entry RAP. Upon ++transition between 1T/2T mode, the RAP contents are not modified but the RAP ++pointers (which control the next return target to use for predictions) may ++change. This behavior may result in return targets from one SMT thread being ++used by RET predictions in the sibling thread following a 1T/2T switch. In ++particular, a RET instruction executed immediately after a transition to 1T may ++use a return target from the thread that just became idle. In theory, this ++could lead to information disclosure if the return targets used do not come ++from trustworthy code. ++ ++Attack scenarios ++---------------- ++ ++An attack can be mounted on affected processors by performing a series of CALL ++instructions with targeted return locations and then transitioning out of C0 ++state. ++ ++Mitigation mechanism ++-------------------- ++ ++Before entering idle state, the kernel context switches to the idle thread. The ++context switch fills the RAP entries (referred to as the RSB in Linux) with safe ++targets by performing a sequence of CALL instructions. ++ ++Prevent a guest VM from directly putting the processor into an idle state by ++intercepting HLT and MWAIT instructions. ++ ++Both mitigations are required to fully address this issue. ++ ++Mitigation control on the kernel command line ++--------------------------------------------- ++ ++Use existing Spectre v2 mitigations that will fill the RSB on context switch. ++ ++Mitigation control for KVM - module parameter ++--------------------------------------------- ++ ++By default, the KVM hypervisor mitigates this issue by intercepting guest ++attempts to transition out of C0. A VMM can use the KVM_CAP_X86_DISABLE_EXITS ++capability to override those interceptions, but since this is not common, the ++mitigation that covers this path is not enabled by default. ++ ++The mitigation for the KVM_CAP_X86_DISABLE_EXITS capability can be turned on ++using the boolean module parameter mitigate_smt_rsb, e.g.: ++ kvm.mitigate_smt_rsb=1 +diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst +index 4df436e7c4177..e0614760a99e7 100644 +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -18,3 +18,4 @@ are configurable at compile, boot or run time. + core-scheduling.rst + l1d_flush.rst + processor_mmio_stale_data.rst ++ cross-thread-rsb.rst +-- +2.39.0 + diff --git a/queue-6.1/kvm-x86-mitigate-the-cross-thread-return-address-pre.patch b/queue-6.1/kvm-x86-mitigate-the-cross-thread-return-address-pre.patch new file mode 100644 index 00000000000..f61a20ac3aa --- /dev/null +++ b/queue-6.1/kvm-x86-mitigate-the-cross-thread-return-address-pre.patch @@ -0,0 +1,108 @@ +From f45827d4789946086353df298ed5f0610feb7d2d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 9 Feb 2023 09:22:25 -0600 +Subject: KVM: x86: Mitigate the cross-thread return address predictions bug + +From: Tom Lendacky + +[ Upstream commit 6f0f2d5ef895d66a3f2b32dd05189ec34afa5a55 ] + +By default, KVM/SVM will intercept attempts by the guest to transition +out of C0. However, the KVM_CAP_X86_DISABLE_EXITS capability can be used +by a VMM to change this behavior. To mitigate the cross-thread return +address predictions bug (X86_BUG_SMT_RSB), a VMM must not be allowed to +override the default behavior to intercept C0 transitions. + +Use a module parameter to control the mitigation on processors that are +vulnerable to X86_BUG_SMT_RSB. If the processor is vulnerable to the +X86_BUG_SMT_RSB bug and the module parameter is set to mitigate the bug, +KVM will not allow the disabling of the HLT, MWAIT and CSTATE exits. + +Signed-off-by: Tom Lendacky +Message-Id: <4019348b5e07148eb4d593380a5f6713b93c9a16.1675956146.git.thomas.lendacky@amd.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/x86.c | 43 ++++++++++++++++++++++++++++++++----------- + 1 file changed, 32 insertions(+), 11 deletions(-) + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 69227f77b201d..a0c35b948c30b 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -192,6 +192,10 @@ module_param(enable_pmu, bool, 0444); + bool __read_mostly eager_page_split = true; + module_param(eager_page_split, bool, 0644); + ++/* Enable/disable SMT_RSB bug mitigation */ ++bool __read_mostly mitigate_smt_rsb; ++module_param(mitigate_smt_rsb, bool, 0444); ++ + /* + * Restoring the host value for MSRs that are only consumed when running in + * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU +@@ -4435,10 +4439,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) + r = KVM_CLOCK_VALID_FLAGS; + break; + case KVM_CAP_X86_DISABLE_EXITS: +- r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | +- KVM_X86_DISABLE_EXITS_CSTATE; +- if(kvm_can_mwait_in_guest()) +- r |= KVM_X86_DISABLE_EXITS_MWAIT; ++ r = KVM_X86_DISABLE_EXITS_PAUSE; ++ ++ if (!mitigate_smt_rsb) { ++ r |= KVM_X86_DISABLE_EXITS_HLT | ++ KVM_X86_DISABLE_EXITS_CSTATE; ++ ++ if (kvm_can_mwait_in_guest()) ++ r |= KVM_X86_DISABLE_EXITS_MWAIT; ++ } + break; + case KVM_CAP_X86_SMM: + /* SMBASE is usually relocated above 1M on modern chipsets, +@@ -6214,15 +6223,26 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) + break; + +- if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && +- kvm_can_mwait_in_guest()) +- kvm->arch.mwait_in_guest = true; +- if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) +- kvm->arch.hlt_in_guest = true; + if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) + kvm->arch.pause_in_guest = true; +- if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) +- kvm->arch.cstate_in_guest = true; ++ ++#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \ ++ "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests." ++ ++ if (!mitigate_smt_rsb) { ++ if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() && ++ (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE)) ++ pr_warn_once(SMT_RSB_MSG); ++ ++ if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && ++ kvm_can_mwait_in_guest()) ++ kvm->arch.mwait_in_guest = true; ++ if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) ++ kvm->arch.hlt_in_guest = true; ++ if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) ++ kvm->arch.cstate_in_guest = true; ++ } ++ + r = 0; + break; + case KVM_CAP_MSR_PLATFORM_INFO: +@@ -13730,6 +13750,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); + static int __init kvm_x86_init(void) + { + kvm_mmu_x86_module_init(); ++ mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible(); + return 0; + } + module_init(kvm_x86_init); +-- +2.39.0 + diff --git a/queue-6.1/series b/queue-6.1/series index b105a89a619..a20c0f266aa 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -112,3 +112,6 @@ drm-amd-display-fix-cursor-offset-on-rotation-180.patch drm-i915-move-fd_install-after-last-use-of-fence.patch drm-i915-initialize-the-obj-flags-for-shmem-objects.patch drm-i915-fix-vbt-dsi-dvo-port-handling.patch +x86-speculation-identify-processors-vulnerable-to-sm.patch +kvm-x86-mitigate-the-cross-thread-return-address-pre.patch +documentation-hw-vuln-add-documentation-for-cross-th.patch diff --git a/queue-6.1/x86-speculation-identify-processors-vulnerable-to-sm.patch b/queue-6.1/x86-speculation-identify-processors-vulnerable-to-sm.patch new file mode 100644 index 00000000000..6db9ce4f58b --- /dev/null +++ b/queue-6.1/x86-speculation-identify-processors-vulnerable-to-sm.patch @@ -0,0 +1,79 @@ +From 7e07cf84d679ffc46e93ebbc39732453c67e1bcd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 9 Feb 2023 09:22:24 -0600 +Subject: x86/speculation: Identify processors vulnerable to SMT RSB + predictions + +From: Tom Lendacky + +[ Upstream commit be8de49bea505e7777a69ef63d60e02ac1712683 ] + +Certain AMD processors are vulnerable to a cross-thread return address +predictions bug. When running in SMT mode and one of the sibling threads +transitions out of C0 state, the other sibling thread could use return +target predictions from the sibling thread that transitioned out of C0. + +The Spectre v2 mitigations cover the Linux kernel, as it fills the RSB +when context switching to the idle thread. However, KVM allows a VMM to +prevent exiting guest mode when transitioning out of C0. A guest could +act maliciously in this situation, so create a new x86 BUG that can be +used to detect if the processor is vulnerable. + +Reviewed-by: Borislav Petkov (AMD) +Signed-off-by: Tom Lendacky +Message-Id: <91cec885656ca1fcd4f0185ce403a53dd9edecb7.1675956146.git.thomas.lendacky@amd.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/kernel/cpu/common.c | 9 +++++++-- + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index b2da7cb64b317..92729c38853d1 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -463,5 +463,6 @@ + #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ + #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ + #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ ++#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 3e508f2390983..e80572b674b7a 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1235,6 +1235,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + #define MMIO_SBDS BIT(2) + /* CPU is affected by RETbleed, speculating where you would not expect it */ + #define RETBLEED BIT(3) ++/* CPU is affected by SMT (cross-thread) return predictions */ ++#define SMT_RSB BIT(4) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1266,8 +1268,8 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + + VULNBL_AMD(0x15, RETBLEED), + VULNBL_AMD(0x16, RETBLEED), +- VULNBL_AMD(0x17, RETBLEED), +- VULNBL_HYGON(0x18, RETBLEED), ++ VULNBL_AMD(0x17, RETBLEED | SMT_RSB), ++ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), + {} + }; + +@@ -1385,6 +1387,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + !(ia32_cap & ARCH_CAP_PBRSB_NO)) + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); + ++ if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) ++ setup_force_cpu_bug(X86_BUG_SMT_RSB); ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +-- +2.39.0 + -- 2.47.2