]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.4.168/kvm-svm-implement-virt_spec_ctrl-support-for-ssbd.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.168 / kvm-svm-implement-virt_spec_ctrl-support-for-ssbd.patch
CommitLineData
28bdf407
GKH
1From foo@baz Thu Dec 13 20:11:30 CET 2018
2From: Tom Lendacky <thomas.lendacky@amd.com>
3Date: Thu, 10 May 2018 22:06:39 +0200
4Subject: KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
5
6From: Tom Lendacky <thomas.lendacky@amd.com>
7
8commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream.
9
10Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
11speculative store bypass disable (SSBD) under SVM. This will allow guests
12to use SSBD on hardware that uses non-architectural mechanisms for enabling
13SSBD.
14
15[ tglx: Folded the migration fixup from Paolo Bonzini ]
16
17Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
18Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
19Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
20Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
22Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
23---
24 arch/x86/include/asm/kvm_host.h | 2 +-
25 arch/x86/kernel/cpu/common.c | 3 ++-
26 arch/x86/kvm/cpuid.c | 11 +++++++++--
27 arch/x86/kvm/cpuid.h | 9 +++++++++
28 arch/x86/kvm/svm.c | 21 +++++++++++++++++++--
29 arch/x86/kvm/vmx.c | 18 +++++++++++++++---
30 arch/x86/kvm/x86.c | 13 ++++---------
31 7 files changed, 59 insertions(+), 18 deletions(-)
32
33--- a/arch/x86/include/asm/kvm_host.h
34+++ b/arch/x86/include/asm/kvm_host.h
35@@ -765,7 +765,7 @@ struct kvm_x86_ops {
36 int (*hardware_setup)(void); /* __init */
37 void (*hardware_unsetup)(void); /* __exit */
38 bool (*cpu_has_accelerated_tpr)(void);
39- bool (*cpu_has_high_real_mode_segbase)(void);
40+ bool (*has_emulated_msr)(int index);
41 void (*cpuid_update)(struct kvm_vcpu *vcpu);
42
43 /* Create, but do not attach this VCPU */
44--- a/arch/x86/kernel/cpu/common.c
45+++ b/arch/x86/kernel/cpu/common.c
46@@ -693,7 +693,8 @@ static void init_speculation_control(str
47 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
48 set_cpu_cap(c, X86_FEATURE_STIBP);
49
50- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
51+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
52+ cpu_has(c, X86_FEATURE_VIRT_SSBD))
53 set_cpu_cap(c, X86_FEATURE_SSBD);
54
55 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
56--- a/arch/x86/kvm/cpuid.c
57+++ b/arch/x86/kvm/cpuid.c
58@@ -343,7 +343,7 @@ static inline int __do_cpuid_ent(struct
59
60 /* cpuid 0x80000008.ebx */
61 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
62- F(AMD_IBPB) | F(AMD_IBRS);
63+ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
64
65 /* cpuid 0xC0000001.edx */
66 const u32 kvm_supported_word5_x86_features =
67@@ -595,13 +595,20 @@ static inline int __do_cpuid_ent(struct
68 g_phys_as = phys_as;
69 entry->eax = g_phys_as | (virt_as << 8);
70 entry->edx = 0;
71- /* IBRS and IBPB aren't necessarily present in hardware cpuid */
72+ /*
73+ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
74+ * hardware cpuid
75+ */
76 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
77 entry->ebx |= F(AMD_IBPB);
78 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
79 entry->ebx |= F(AMD_IBRS);
80+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
81+ entry->ebx |= F(VIRT_SSBD);
82 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
83 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
84+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
85+ entry->ebx |= F(VIRT_SSBD);
86 break;
87 }
88 case 0x80000019:
89--- a/arch/x86/kvm/cpuid.h
90+++ b/arch/x86/kvm/cpuid.h
91@@ -189,6 +189,15 @@ static inline bool guest_cpuid_has_arch_
92 return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
93 }
94
95+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
96+{
97+ struct kvm_cpuid_entry2 *best;
98+
99+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
100+ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
101+}
102+
103+
104
105 /*
106 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
107--- a/arch/x86/kvm/svm.c
108+++ b/arch/x86/kvm/svm.c
109@@ -3102,6 +3102,13 @@ static int svm_get_msr(struct kvm_vcpu *
110
111 msr_info->data = svm->spec_ctrl;
112 break;
113+ case MSR_AMD64_VIRT_SPEC_CTRL:
114+ if (!msr_info->host_initiated &&
115+ !guest_cpuid_has_virt_ssbd(vcpu))
116+ return 1;
117+
118+ msr_info->data = svm->virt_spec_ctrl;
119+ break;
120 case MSR_IA32_UCODE_REV:
121 msr_info->data = 0x01000065;
122 break;
123@@ -3219,6 +3226,16 @@ static int svm_set_msr(struct kvm_vcpu *
124 break;
125 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
126 break;
127+ case MSR_AMD64_VIRT_SPEC_CTRL:
128+ if (!msr->host_initiated &&
129+ !guest_cpuid_has_virt_ssbd(vcpu))
130+ return 1;
131+
132+ if (data & ~SPEC_CTRL_SSBD)
133+ return 1;
134+
135+ svm->virt_spec_ctrl = data;
136+ break;
137 case MSR_STAR:
138 svm->vmcb->save.star = data;
139 break;
140@@ -4137,7 +4154,7 @@ static bool svm_cpu_has_accelerated_tpr(
141 return false;
142 }
143
144-static bool svm_has_high_real_mode_segbase(void)
145+static bool svm_has_emulated_msr(int index)
146 {
147 return true;
148 }
149@@ -4421,7 +4438,7 @@ static struct kvm_x86_ops svm_x86_ops =
150 .hardware_enable = svm_hardware_enable,
151 .hardware_disable = svm_hardware_disable,
152 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
153- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
154+ .has_emulated_msr = svm_has_emulated_msr,
155
156 .vcpu_create = svm_create_vcpu,
157 .vcpu_free = svm_free_vcpu,
158--- a/arch/x86/kvm/vmx.c
159+++ b/arch/x86/kvm/vmx.c
160@@ -8458,9 +8458,21 @@ static void vmx_handle_external_intr(str
161 local_irq_enable();
162 }
163
164-static bool vmx_has_high_real_mode_segbase(void)
165+static bool vmx_has_emulated_msr(int index)
166 {
167- return enable_unrestricted_guest || emulate_invalid_guest_state;
168+ switch (index) {
169+ case MSR_IA32_SMBASE:
170+ /*
171+ * We cannot do SMM unless we can run the guest in big
172+ * real mode.
173+ */
174+ return enable_unrestricted_guest || emulate_invalid_guest_state;
175+ case MSR_AMD64_VIRT_SPEC_CTRL:
176+ /* This is AMD only. */
177+ return false;
178+ default:
179+ return true;
180+ }
181 }
182
183 static bool vmx_mpx_supported(void)
184@@ -10952,7 +10964,7 @@ static struct kvm_x86_ops vmx_x86_ops =
185 .hardware_enable = hardware_enable,
186 .hardware_disable = hardware_disable,
187 .cpu_has_accelerated_tpr = report_flexpriority,
188- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
189+ .has_emulated_msr = vmx_has_emulated_msr,
190
191 .vcpu_create = vmx_create_vcpu,
192 .vcpu_free = vmx_free_vcpu,
193--- a/arch/x86/kvm/x86.c
194+++ b/arch/x86/kvm/x86.c
195@@ -985,6 +985,7 @@ static u32 emulated_msrs[] = {
196 MSR_IA32_MCG_STATUS,
197 MSR_IA32_MCG_CTL,
198 MSR_IA32_SMBASE,
199+ MSR_AMD64_VIRT_SPEC_CTRL,
200 };
201
202 static unsigned num_emulated_msrs;
203@@ -2584,7 +2585,7 @@ int kvm_vm_ioctl_check_extension(struct
204 * fringe case that is not enabled except via specific settings
205 * of the module parameters.
206 */
207- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
208+ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
209 break;
210 case KVM_CAP_COALESCED_MMIO:
211 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
212@@ -4073,14 +4074,8 @@ static void kvm_init_msr_list(void)
213 num_msrs_to_save = j;
214
215 for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
216- switch (emulated_msrs[i]) {
217- case MSR_IA32_SMBASE:
218- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
219- continue;
220- break;
221- default:
222- break;
223- }
224+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
225+ continue;
226
227 if (j < i)
228 emulated_msrs[j] = emulated_msrs[i];