]>
Commit | Line | Data |
---|---|---|
6fa88700 GKH |
1 | From 58840bc9485d12a77ac5421ba3e8069c4d795d92 Mon Sep 17 00:00:00 2001 |
2 | From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | |
3 | Date: Fri, 1 Jun 2018 10:59:20 -0400 | |
4 | Subject: [PATCH 04/76] x86/bugs: Add AMD's SPEC_CTRL MSR usage | |
5 | MIME-Version: 1.0 | |
6 | Content-Type: text/plain; charset=UTF-8 | |
7 | Content-Transfer-Encoding: 8bit | |
8 | ||
9 | commit 6ac2f49edb1ef5446089c7c660017732886d62d6 upstream. | |
10 | ||
11 | The AMD document outlining the SSBD handling | |
12 | 124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf | |
13 | mentions that if CPUID 8000_0008.EBX[24] is set we should be using | |
14 | the SPEC_CTRL MSR (0x48) over the VIRT SPEC_CTRL MSR (0xC001_011f) | |
15 | for speculative store bypass disable. | |
16 | ||
17 | This in effect means we should clear the X86_FEATURE_VIRT_SSBD | |
18 | flag so that we would prefer the SPEC_CTRL MSR. | |
19 | ||
20 | See the document titled: | |
21 | 124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf | |
22 | ||
23 | A copy of this document is available at | |
24 | https://bugzilla.kernel.org/show_bug.cgi?id=199889 | |
25 | ||
26 | Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | |
27 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | |
28 | Cc: Tom Lendacky <thomas.lendacky@amd.com> | |
29 | Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com> | |
30 | Cc: kvm@vger.kernel.org | |
31 | Cc: KarimAllah Ahmed <karahmed@amazon.de> | |
32 | Cc: andrew.cooper3@citrix.com | |
33 | Cc: Joerg Roedel <joro@8bytes.org> | |
34 | Cc: Radim Krčmář <rkrcmar@redhat.com> | |
35 | Cc: Andy Lutomirski <luto@kernel.org> | |
36 | Cc: "H. Peter Anvin" <hpa@zytor.com> | |
37 | Cc: Paolo Bonzini <pbonzini@redhat.com> | |
38 | Cc: Borislav Petkov <bp@suse.de> | |
39 | Cc: David Woodhouse <dwmw@amazon.co.uk> | |
40 | Cc: Kees Cook <keescook@chromium.org> | |
41 | Link: https://lkml.kernel.org/r/20180601145921.9500-3-konrad.wilk@oracle.com | |
42 | [bwh: Backported to 4.9: | |
43 | - Update feature test in guest_cpuid_has_spec_ctrl() instead of | |
44 | svm_{get,set}_msr() | |
45 | - Adjust context, indentation] | |
46 | Signed-off-by: Ben Hutchings <ben@decadent.org.uk> | |
47 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
48 | --- | |
49 | arch/x86/include/asm/cpufeatures.h | 1 + | |
50 | arch/x86/kernel/cpu/bugs.c | 12 +++++++----- | |
51 | arch/x86/kernel/cpu/common.c | 6 ++++++ | |
52 | arch/x86/kvm/cpuid.c | 10 ++++++++-- | |
53 | arch/x86/kvm/cpuid.h | 2 +- | |
54 | arch/x86/kvm/svm.c | 2 +- | |
55 | 6 files changed, 24 insertions(+), 9 deletions(-) | |
56 | ||
57 | diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h | |
58 | index 453ab6f3bca0..234d74186046 100644 | |
59 | --- a/arch/x86/include/asm/cpufeatures.h | |
60 | +++ b/arch/x86/include/asm/cpufeatures.h | |
61 | @@ -274,6 +274,7 @@ | |
62 | #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ | |
63 | #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ | |
64 | #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ | |
65 | +#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ | |
66 | #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ | |
67 | #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ | |
68 | ||
69 | diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c | |
70 | index 6221166e3fca..b1146405ce8a 100644 | |
71 | --- a/arch/x86/kernel/cpu/bugs.c | |
72 | +++ b/arch/x86/kernel/cpu/bugs.c | |
73 | @@ -531,18 +531,20 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) | |
74 | if (mode == SPEC_STORE_BYPASS_DISABLE) { | |
75 | setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); | |
76 | /* | |
77 | - * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses | |
78 | - * a completely different MSR and bit dependent on family. | |
79 | + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may | |
80 | + * use a completely different MSR and bit dependent on family. | |
81 | */ | |
82 | switch (boot_cpu_data.x86_vendor) { | |
83 | case X86_VENDOR_INTEL: | |
84 | + case X86_VENDOR_AMD: | |
85 | + if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { | |
86 | + x86_amd_ssb_disable(); | |
87 | + break; | |
88 | + } | |
89 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; | |
90 | x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; | |
91 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | |
92 | break; | |
93 | - case X86_VENDOR_AMD: | |
94 | - x86_amd_ssb_disable(); | |
95 | - break; | |
96 | } | |
97 | } | |
98 | ||
99 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c | |
100 | index 9b8e912fd840..59b2dc011f7f 100644 | |
101 | --- a/arch/x86/kernel/cpu/common.c | |
102 | +++ b/arch/x86/kernel/cpu/common.c | |
103 | @@ -752,6 +752,12 @@ static void init_speculation_control(struct cpuinfo_x86 *c) | |
104 | set_cpu_cap(c, X86_FEATURE_STIBP); | |
105 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); | |
106 | } | |
107 | + | |
108 | + if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { | |
109 | + set_cpu_cap(c, X86_FEATURE_SSBD); | |
110 | + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); | |
111 | + clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); | |
112 | + } | |
113 | } | |
114 | ||
115 | void get_cpu_cap(struct cpuinfo_x86 *c) | |
116 | diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c | |
117 | index f1f1a128bbdb..b6435f3be254 100644 | |
118 | --- a/arch/x86/kvm/cpuid.c | |
119 | +++ b/arch/x86/kvm/cpuid.c | |
120 | @@ -355,7 +355,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |
121 | ||
122 | /* cpuid 0x80000008.ebx */ | |
123 | const u32 kvm_cpuid_8000_0008_ebx_x86_features = | |
124 | - F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD) | F(AMD_SSB_NO); | |
125 | + F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | | |
126 | + F(AMD_SSB_NO); | |
127 | ||
128 | /* cpuid 0xC0000001.edx */ | |
129 | const u32 kvm_cpuid_C000_0001_edx_x86_features = | |
130 | @@ -633,7 +634,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |
131 | entry->ebx |= F(VIRT_SSBD); | |
132 | entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; | |
133 | cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); | |
134 | - if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) | |
135 | + /* | |
136 | + * The preference is to use SPEC CTRL MSR instead of the | |
137 | + * VIRT_SPEC MSR. | |
138 | + */ | |
139 | + if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && | |
140 | + !boot_cpu_has(X86_FEATURE_AMD_SSBD)) | |
141 | entry->ebx |= F(VIRT_SSBD); | |
142 | break; | |
143 | } | |
144 | diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h | |
145 | index 8a841b9d8f84..b2bf8e1d5782 100644 | |
146 | --- a/arch/x86/kvm/cpuid.h | |
147 | +++ b/arch/x86/kvm/cpuid.h | |
148 | @@ -176,7 +176,7 @@ static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu) | |
149 | struct kvm_cpuid_entry2 *best; | |
150 | ||
151 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); | |
152 | - if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS))) | |
153 | + if (best && (best->ebx & (bit(X86_FEATURE_AMD_IBRS | bit(X86_FEATURE_AMD_SSBD))))) | |
154 | return true; | |
155 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
156 | return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD))); | |
157 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c | |
158 | index 9a6d258c3c16..9338136a6a23 100644 | |
159 | --- a/arch/x86/kvm/svm.c | |
160 | +++ b/arch/x86/kvm/svm.c | |
161 | @@ -3704,7 +3704,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |
162 | return 1; | |
163 | ||
164 | /* The STIBP bit doesn't fault even if it's not advertised */ | |
165 | - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) | |
166 | + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) | |
167 | return 1; | |
168 | ||
169 | svm->spec_ctrl = data; | |
170 | -- | |
171 | 2.21.0 | |
172 |