]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/kvm/svm/svm.c
Merge tag 'riscv-for-linus-5.7-rc4' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / arch / x86 / kvm / svm / svm.c
CommitLineData
44a95dae
SS
1#define pr_fmt(fmt) "SVM: " fmt
2
edf88417
AK
3#include <linux/kvm_host.h>
4
85f455f7 5#include "irq.h"
1d737c8a 6#include "mmu.h"
5fdbf976 7#include "kvm_cache_regs.h"
fe4c7b19 8#include "x86.h"
66f7b72e 9#include "cpuid.h"
25462f7f 10#include "pmu.h"
e495606d 11
6aa8b732 12#include <linux/module.h>
ae759544 13#include <linux/mod_devicetable.h>
9d8f549d 14#include <linux/kernel.h>
6aa8b732
AK
15#include <linux/vmalloc.h>
16#include <linux/highmem.h>
ef0f6496 17#include <linux/amd-iommu.h>
e8edc6e0 18#include <linux/sched.h>
af658dca 19#include <linux/trace_events.h>
5a0e3ad6 20#include <linux/slab.h>
5881f737 21#include <linux/hashtable.h>
c207aee4 22#include <linux/frame.h>
e9df0942 23#include <linux/psp-sev.h>
1654efcb 24#include <linux/file.h>
89c50580
BS
25#include <linux/pagemap.h>
26#include <linux/swap.h>
33af3a7e 27#include <linux/rwsem.h>
6aa8b732 28
8221c137 29#include <asm/apic.h>
1018faa6 30#include <asm/perf_event.h>
67ec6607 31#include <asm/tlbflush.h>
e495606d 32#include <asm/desc.h>
facb0139 33#include <asm/debugreg.h>
631bc487 34#include <asm/kvm_para.h>
411b44ba 35#include <asm/irq_remapping.h>
28a27752 36#include <asm/spec-ctrl.h>
ba5bade4 37#include <asm/cpu_device_id.h>
6aa8b732 38
63d1142f 39#include <asm/virtext.h>
229456fc 40#include "trace.h"
63d1142f 41
883b0a91
JR
42#include "svm.h"
43
4ecac3fd
AK
44#define __ex(x) __kvm_handle_fault_on_reboot(x)
45
6aa8b732
AK
46MODULE_AUTHOR("Qumranet");
47MODULE_LICENSE("GPL");
48
575b255c 49#ifdef MODULE
ae759544 50static const struct x86_cpu_id svm_cpu_id[] = {
320debe5 51 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
ae759544
JT
52 {}
53};
54MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
575b255c 55#endif
ae759544 56
6aa8b732
AK
57#define IOPM_ALLOC_ORDER 2
58#define MSRPM_ALLOC_ORDER 1
59
6aa8b732
AK
60#define SEG_TYPE_LDT 2
61#define SEG_TYPE_BUSY_TSS16 3
62
6bc31bdc
AP
63#define SVM_FEATURE_LBRV (1 << 1)
64#define SVM_FEATURE_SVML (1 << 2)
ddce97aa
AP
65#define SVM_FEATURE_TSC_RATE (1 << 4)
66#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
67#define SVM_FEATURE_FLUSH_ASID (1 << 6)
68#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
6bc31bdc 69#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
80b7706e 70
24e09cbf
JR
71#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
72
fbc0db76 73#define TSC_RATIO_RSVD 0xffffff0000000000ULL
92a1f12d
JR
74#define TSC_RATIO_MIN 0x0000000000000001ULL
75#define TSC_RATIO_MAX 0x000000ffffffffffULL
fbc0db76 76
67ec6607
JR
77static bool erratum_383_found __read_mostly;
78
883b0a91 79u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
323c3d80 80
2b036c6b
BO
81/*
82 * Set osvw_len to higher value when updated Revision Guides
83 * are published and we know what the new status bits are
84 */
85static uint64_t osvw_len = 4, osvw_status;
86
fbc0db76
JR
87static DEFINE_PER_CPU(u64, current_tsc_ratio);
88#define TSC_RATIO_DEFAULT 0x0100000000ULL
89
09941fbb 90static const struct svm_direct_access_msrs {
ac72a9b7
JR
91 u32 index; /* Index of the MSR */
92 bool always; /* True if intercept is always on */
93} direct_access_msrs[] = {
8c06585d 94 { .index = MSR_STAR, .always = true },
ac72a9b7
JR
95 { .index = MSR_IA32_SYSENTER_CS, .always = true },
96#ifdef CONFIG_X86_64
97 { .index = MSR_GS_BASE, .always = true },
98 { .index = MSR_FS_BASE, .always = true },
99 { .index = MSR_KERNEL_GS_BASE, .always = true },
100 { .index = MSR_LSTAR, .always = true },
101 { .index = MSR_CSTAR, .always = true },
102 { .index = MSR_SYSCALL_MASK, .always = true },
103#endif
b2ac58f9 104 { .index = MSR_IA32_SPEC_CTRL, .always = false },
15d45071 105 { .index = MSR_IA32_PRED_CMD, .always = false },
ac72a9b7
JR
106 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
107 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
108 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
109 { .index = MSR_IA32_LASTINTTOIP, .always = false },
110 { .index = MSR_INVALID, .always = false },
6c8166a7
AK
111};
112
709ddebf
JR
113/* enable NPT for AMD64 and X86 with PAE */
114#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
883b0a91 115bool npt_enabled = true;
709ddebf 116#else
883b0a91 117bool npt_enabled;
709ddebf 118#endif
6c7dac72 119
8566ac8b
BM
120/*
121 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
122 * pause_filter_count: On processors that support Pause filtering(indicated
123 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
124 * count value. On VMRUN this value is loaded into an internal counter.
125 * Each time a pause instruction is executed, this counter is decremented
126 * until it reaches zero at which time a #VMEXIT is generated if pause
127 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
128 * Intercept Filtering for more details.
129 * This also indicate if ple logic enabled.
130 *
131 * pause_filter_thresh: In addition, some processor families support advanced
132 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
133 * the amount of time a guest is allowed to execute in a pause loop.
134 * In this mode, a 16-bit pause filter threshold field is added in the
135 * VMCB. The threshold value is a cycle count that is used to reset the
136 * pause counter. As with simple pause filtering, VMRUN loads the pause
137 * count value from VMCB into an internal counter. Then, on each pause
138 * instruction the hardware checks the elapsed number of cycles since
139 * the most recent pause instruction against the pause filter threshold.
140 * If the elapsed cycle count is greater than the pause filter threshold,
141 * then the internal pause count is reloaded from the VMCB and execution
142 * continues. If the elapsed cycle count is less than the pause filter
143 * threshold, then the internal pause count is decremented. If the count
144 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
145 * triggered. If advanced pause filtering is supported and pause filter
146 * threshold field is set to zero, the filter will operate in the simpler,
147 * count only mode.
148 */
149
150static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
151module_param(pause_filter_thresh, ushort, 0444);
152
153static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
154module_param(pause_filter_count, ushort, 0444);
155
156/* Default doubles per-vcpu window every exit. */
157static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
158module_param(pause_filter_count_grow, ushort, 0444);
159
160/* Default resets per-vcpu window every exit to pause_filter_count. */
161static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
162module_param(pause_filter_count_shrink, ushort, 0444);
163
164/* Default is to compute the maximum so we can never overflow. */
165static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
166module_param(pause_filter_count_max, ushort, 0444);
167
e2358851
DB
168/* allow nested paging (virtualized MMU) for all guests */
169static int npt = true;
6c7dac72 170module_param(npt, int, S_IRUGO);
e3da3acd 171
e2358851
DB
172/* allow nested virtualization in KVM/SVM */
173static int nested = true;
236de055
AG
174module_param(nested, int, S_IRUGO);
175
d647eb63
PB
176/* enable/disable Next RIP Save */
177static int nrips = true;
178module_param(nrips, int, 0444);
179
89c8a498
JN
180/* enable/disable Virtual VMLOAD VMSAVE */
181static int vls = true;
182module_param(vls, int, 0444);
183
640bd6e5
JN
184/* enable/disable Virtual GIF */
185static int vgif = true;
186module_param(vgif, int, 0444);
5ea11f2b 187
e9df0942
BS
188/* enable/disable SEV support */
189static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
190module_param(sev, int, 0444);
191
6f2f8453
PB
192static bool __read_mostly dump_invalid_vmcb = 0;
193module_param(dump_invalid_vmcb, bool, 0644);
194
7607b717
BS
195static u8 rsm_ins_bytes[] = "\x0f\xaa";
196
a5c3832d 197static void svm_complete_interrupts(struct vcpu_svm *svm);
44a95dae 198
4866d5e3 199static unsigned long iopm_base;
6aa8b732
AK
200
201struct kvm_ldttss_desc {
202 u16 limit0;
203 u16 base0;
e0231715
JR
204 unsigned base1:8, type:5, dpl:2, p:1;
205 unsigned limit1:4, zero0:3, g:1, base2:8;
6aa8b732
AK
206 u32 base3;
207 u32 zero1;
208} __attribute__((packed));
209
eaf78265 210DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
6aa8b732 211
09941fbb 212static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
6aa8b732 213
9d8f549d 214#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
6aa8b732
AK
215#define MSRS_RANGE_SIZE 2048
216#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
217
883b0a91 218u32 svm_msrpm_offset(u32 msr)
455716fa
JR
219{
220 u32 offset;
221 int i;
222
223 for (i = 0; i < NUM_MSR_MAPS; i++) {
224 if (msr < msrpm_ranges[i] ||
225 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
226 continue;
227
228 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
229 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
230
231 /* Now we have the u8 offset - but need the u32 offset */
232 return offset / 4;
233 }
234
235 /* MSR not in any range */
236 return MSR_INVALID;
237}
238
6aa8b732
AK
239#define MAX_INST_SIZE 15
240
6aa8b732
AK
241static inline void clgi(void)
242{
ac5ffda2 243 asm volatile (__ex("clgi"));
6aa8b732
AK
244}
245
246static inline void stgi(void)
247{
ac5ffda2 248 asm volatile (__ex("stgi"));
6aa8b732
AK
249}
250
251static inline void invlpga(unsigned long addr, u32 asid)
252{
ac5ffda2 253 asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
6aa8b732
AK
254}
255
855feb67 256static int get_npt_level(struct kvm_vcpu *vcpu)
4b16184c
JR
257{
258#ifdef CONFIG_X86_64
2a7266a8 259 return PT64_ROOT_4LEVEL;
4b16184c
JR
260#else
261 return PT32E_ROOT_LEVEL;
262#endif
263}
264
883b0a91 265void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
6aa8b732 266{
6dc696d4 267 vcpu->arch.efer = efer;
9167ab79
PB
268
269 if (!npt_enabled) {
270 /* Shadow paging assumes NX to be available. */
271 efer |= EFER_NX;
272
273 if (!(efer & EFER_LMA))
274 efer &= ~EFER_LME;
275 }
6aa8b732 276
9962d032 277 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
dcca1a65 278 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
6aa8b732
AK
279}
280
6aa8b732
AK
281static int is_external_interrupt(u32 info)
282{
283 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
284 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
285}
286
37ccdcbe 287static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2809f5d2
GC
288{
289 struct vcpu_svm *svm = to_svm(vcpu);
290 u32 ret = 0;
291
292 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
37ccdcbe
PB
293 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
294 return ret;
2809f5d2
GC
295}
296
297static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
298{
299 struct vcpu_svm *svm = to_svm(vcpu);
300
301 if (mask == 0)
302 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
303 else
304 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
305
306}
307
f8ea7c60 308static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
6aa8b732 309{
a2fa3e9f
GH
310 struct vcpu_svm *svm = to_svm(vcpu);
311
d647eb63 312 if (nrips && svm->vmcb->control.next_rip != 0) {
d2922422 313 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
6bc31bdc 314 svm->next_rip = svm->vmcb->control.next_rip;
f104765b 315 }
6bc31bdc 316
1957aa63
SC
317 if (!svm->next_rip) {
318 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
319 return 0;
320 } else {
321 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
322 pr_err("%s: ip 0x%lx next 0x%llx\n",
323 __func__, kvm_rip_read(vcpu), svm->next_rip);
324 kvm_rip_write(vcpu, svm->next_rip);
325 }
2809f5d2 326 svm_set_interrupt_shadow(vcpu, 0);
f8ea7c60 327
60fc3d02 328 return 1;
6aa8b732
AK
329}
330
cfcd20e5 331static void svm_queue_exception(struct kvm_vcpu *vcpu)
116a4752
JK
332{
333 struct vcpu_svm *svm = to_svm(vcpu);
cfcd20e5
WL
334 unsigned nr = vcpu->arch.exception.nr;
335 bool has_error_code = vcpu->arch.exception.has_error_code;
664f8e26 336 bool reinject = vcpu->arch.exception.injected;
cfcd20e5 337 u32 error_code = vcpu->arch.exception.error_code;
116a4752 338
e0231715
JR
339 /*
340 * If we are within a nested VM we'd better #VMEXIT and let the guest
341 * handle the exception
342 */
ce7ddec4
JR
343 if (!reinject &&
344 nested_svm_check_exception(svm, nr, has_error_code, error_code))
116a4752
JK
345 return;
346
da998b46
JM
347 kvm_deliver_exception_payload(&svm->vcpu);
348
d647eb63 349 if (nr == BP_VECTOR && !nrips) {
66b7138f
JK
350 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
351
352 /*
353 * For guest debugging where we have to reinject #BP if some
354 * INT3 is guest-owned:
355 * Emulate nRIP by moving RIP forward. Will fail if injection
356 * raises a fault that is not intercepted. Still better than
357 * failing in all cases.
358 */
f8ea7c60 359 (void)skip_emulated_instruction(&svm->vcpu);
66b7138f
JK
360 rip = kvm_rip_read(&svm->vcpu);
361 svm->int3_rip = rip + svm->vmcb->save.cs.base;
362 svm->int3_injected = rip - old_rip;
363 }
364
116a4752
JK
365 svm->vmcb->control.event_inj = nr
366 | SVM_EVTINJ_VALID
367 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
368 | SVM_EVTINJ_TYPE_EXEPT;
369 svm->vmcb->control.event_inj_err = error_code;
370}
371
67ec6607
JR
372static void svm_init_erratum_383(void)
373{
374 u32 low, high;
375 int err;
376 u64 val;
377
e6ee94d5 378 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
67ec6607
JR
379 return;
380
381 /* Use _safe variants to not break nested virtualization */
382 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
383 if (err)
384 return;
385
386 val |= (1ULL << 47);
387
388 low = lower_32_bits(val);
389 high = upper_32_bits(val);
390
391 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
392
393 erratum_383_found = true;
394}
395
2b036c6b
BO
396static void svm_init_osvw(struct kvm_vcpu *vcpu)
397{
398 /*
399 * Guests should see errata 400 and 415 as fixed (assuming that
400 * HLT and IO instructions are intercepted).
401 */
402 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
403 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
404
405 /*
406 * By increasing VCPU's osvw.length to 3 we are telling the guest that
407 * all osvw.status bits inside that length, including bit 0 (which is
408 * reserved for erratum 298), are valid. However, if host processor's
409 * osvw_len is 0 then osvw_status[0] carries no information. We need to
410 * be conservative here and therefore we tell the guest that erratum 298
411 * is present (because we really don't know).
412 */
413 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
414 vcpu->arch.osvw.status |= 1;
415}
416
6aa8b732
AK
417static int has_svm(void)
418{
63d1142f 419 const char *msg;
6aa8b732 420
63d1142f 421 if (!cpu_has_svm(&msg)) {
ff81ff10 422 printk(KERN_INFO "has_svm: %s\n", msg);
6aa8b732
AK
423 return 0;
424 }
425
6aa8b732
AK
426 return 1;
427}
428
13a34e06 429static void svm_hardware_disable(void)
6aa8b732 430{
fbc0db76
JR
431 /* Make sure we clean up behind us */
432 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
433 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
434
2c8dceeb 435 cpu_svm_disable();
1018faa6
JR
436
437 amd_pmu_disable_virt();
6aa8b732
AK
438}
439
13a34e06 440static int svm_hardware_enable(void)
6aa8b732
AK
441{
442
0fe1e009 443 struct svm_cpu_data *sd;
6aa8b732 444 uint64_t efer;
6aa8b732
AK
445 struct desc_struct *gdt;
446 int me = raw_smp_processor_id();
447
10474ae8
AG
448 rdmsrl(MSR_EFER, efer);
449 if (efer & EFER_SVME)
450 return -EBUSY;
451
6aa8b732 452 if (!has_svm()) {
1f5b77f5 453 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
10474ae8 454 return -EINVAL;
6aa8b732 455 }
0fe1e009 456 sd = per_cpu(svm_data, me);
0fe1e009 457 if (!sd) {
1f5b77f5 458 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
10474ae8 459 return -EINVAL;
6aa8b732
AK
460 }
461
0fe1e009
TH
462 sd->asid_generation = 1;
463 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
464 sd->next_asid = sd->max_asid + 1;
ed3cd233 465 sd->min_asid = max_sev_asid + 1;
6aa8b732 466
45fc8757 467 gdt = get_current_gdt_rw();
0fe1e009 468 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
6aa8b732 469
9962d032 470 wrmsrl(MSR_EFER, efer | EFER_SVME);
6aa8b732 471
d0316554 472 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
10474ae8 473
fbc0db76
JR
474 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
475 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
89cbc767 476 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
fbc0db76
JR
477 }
478
2b036c6b
BO
479
480 /*
481 * Get OSVW bits.
482 *
483 * Note that it is possible to have a system with mixed processor
484 * revisions and therefore different OSVW bits. If bits are not the same
485 * on different processors then choose the worst case (i.e. if erratum
486 * is present on one processor and not on another then assume that the
487 * erratum is present everywhere).
488 */
489 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
490 uint64_t len, status = 0;
491 int err;
492
493 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
494 if (!err)
495 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
496 &err);
497
498 if (err)
499 osvw_status = osvw_len = 0;
500 else {
501 if (len < osvw_len)
502 osvw_len = len;
503 osvw_status |= status;
504 osvw_status &= (1ULL << osvw_len) - 1;
505 }
506 } else
507 osvw_status = osvw_len = 0;
508
67ec6607
JR
509 svm_init_erratum_383();
510
1018faa6
JR
511 amd_pmu_enable_virt();
512
10474ae8 513 return 0;
6aa8b732
AK
514}
515
0da1db75
JR
516static void svm_cpu_uninit(int cpu)
517{
0fe1e009 518 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
0da1db75 519
0fe1e009 520 if (!sd)
0da1db75
JR
521 return;
522
523 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
70cd94e6 524 kfree(sd->sev_vmcbs);
0fe1e009
TH
525 __free_page(sd->save_area);
526 kfree(sd);
0da1db75
JR
527}
528
6aa8b732
AK
529static int svm_cpu_init(int cpu)
530{
0fe1e009 531 struct svm_cpu_data *sd;
6aa8b732 532
0fe1e009
TH
533 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
534 if (!sd)
6aa8b732 535 return -ENOMEM;
0fe1e009 536 sd->cpu = cpu;
70cd94e6 537 sd->save_area = alloc_page(GFP_KERNEL);
0fe1e009 538 if (!sd->save_area)
d80b64ff 539 goto free_cpu_data;
6aa8b732 540
70cd94e6 541 if (svm_sev_enabled()) {
6da2ec56
KC
542 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
543 sizeof(void *),
544 GFP_KERNEL);
70cd94e6 545 if (!sd->sev_vmcbs)
d80b64ff 546 goto free_save_area;
70cd94e6
BS
547 }
548
0fe1e009 549 per_cpu(svm_data, cpu) = sd;
6aa8b732
AK
550
551 return 0;
552
d80b64ff
ML
553free_save_area:
554 __free_page(sd->save_area);
555free_cpu_data:
0fe1e009 556 kfree(sd);
d80b64ff 557 return -ENOMEM;
6aa8b732
AK
558
559}
560
ac72a9b7
JR
561static bool valid_msr_intercept(u32 index)
562{
563 int i;
564
565 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
566 if (direct_access_msrs[i].index == index)
567 return true;
568
569 return false;
570}
571
b2ac58f9
KA
572static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
573{
574 u8 bit_write;
575 unsigned long tmp;
576 u32 offset;
577 u32 *msrpm;
578
579 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
580 to_svm(vcpu)->msrpm;
581
582 offset = svm_msrpm_offset(msr);
583 bit_write = 2 * (msr & 0x0f) + 1;
584 tmp = msrpm[offset];
585
586 BUG_ON(offset == MSR_INVALID);
587
588 return !!test_bit(bit_write, &tmp);
589}
590
bfc733a7
RR
591static void set_msr_interception(u32 *msrpm, unsigned msr,
592 int read, int write)
6aa8b732 593{
455716fa
JR
594 u8 bit_read, bit_write;
595 unsigned long tmp;
596 u32 offset;
6aa8b732 597
ac72a9b7
JR
598 /*
599 * If this warning triggers extend the direct_access_msrs list at the
600 * beginning of the file
601 */
602 WARN_ON(!valid_msr_intercept(msr));
603
455716fa
JR
604 offset = svm_msrpm_offset(msr);
605 bit_read = 2 * (msr & 0x0f);
606 bit_write = 2 * (msr & 0x0f) + 1;
607 tmp = msrpm[offset];
608
609 BUG_ON(offset == MSR_INVALID);
610
611 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
612 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
613
614 msrpm[offset] = tmp;
6aa8b732
AK
615}
616
f65c229c 617static void svm_vcpu_init_msrpm(u32 *msrpm)
6aa8b732
AK
618{
619 int i;
620
f65c229c
JR
621 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
622
ac72a9b7
JR
623 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
624 if (!direct_access_msrs[i].always)
625 continue;
626
627 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
628 }
f65c229c
JR
629}
630
323c3d80
JR
631static void add_msr_offset(u32 offset)
632{
633 int i;
634
635 for (i = 0; i < MSRPM_OFFSETS; ++i) {
636
637 /* Offset already in list? */
638 if (msrpm_offsets[i] == offset)
bfc733a7 639 return;
323c3d80
JR
640
641 /* Slot used by another offset? */
642 if (msrpm_offsets[i] != MSR_INVALID)
643 continue;
644
645 /* Add offset to list */
646 msrpm_offsets[i] = offset;
647
648 return;
6aa8b732 649 }
323c3d80
JR
650
651 /*
652 * If this BUG triggers the msrpm_offsets table has an overflow. Just
653 * increase MSRPM_OFFSETS in this case.
654 */
bfc733a7 655 BUG();
6aa8b732
AK
656}
657
323c3d80 658static void init_msrpm_offsets(void)
f65c229c 659{
323c3d80 660 int i;
f65c229c 661
323c3d80
JR
662 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
663
664 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
665 u32 offset;
666
667 offset = svm_msrpm_offset(direct_access_msrs[i].index);
668 BUG_ON(offset == MSR_INVALID);
669
670 add_msr_offset(offset);
671 }
f65c229c
JR
672}
673
24e09cbf
JR
674static void svm_enable_lbrv(struct vcpu_svm *svm)
675{
676 u32 *msrpm = svm->msrpm;
677
0dc92119 678 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
24e09cbf
JR
679 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
680 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
681 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
682 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
683}
684
685static void svm_disable_lbrv(struct vcpu_svm *svm)
686{
687 u32 *msrpm = svm->msrpm;
688
0dc92119 689 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
24e09cbf
JR
690 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
691 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
692 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
693 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
694}
695
883b0a91 696void disable_nmi_singlestep(struct vcpu_svm *svm)
4aebd0e9
LP
697{
698 svm->nmi_singlestep = false;
640bd6e5 699
ab2f4d73
LP
700 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
701 /* Clear our flags if they were not set by the guest */
702 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
703 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
704 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
705 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
706 }
4aebd0e9
LP
707}
708
8566ac8b
BM
709static void grow_ple_window(struct kvm_vcpu *vcpu)
710{
711 struct vcpu_svm *svm = to_svm(vcpu);
712 struct vmcb_control_area *control = &svm->vmcb->control;
713 int old = control->pause_filter_count;
714
715 control->pause_filter_count = __grow_ple_window(old,
716 pause_filter_count,
717 pause_filter_count_grow,
718 pause_filter_count_max);
719
4f75bcc3 720 if (control->pause_filter_count != old) {
8566ac8b 721 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
4f75bcc3
PX
722 trace_kvm_ple_window_update(vcpu->vcpu_id,
723 control->pause_filter_count, old);
724 }
8566ac8b
BM
725}
726
727static void shrink_ple_window(struct kvm_vcpu *vcpu)
728{
729 struct vcpu_svm *svm = to_svm(vcpu);
730 struct vmcb_control_area *control = &svm->vmcb->control;
731 int old = control->pause_filter_count;
732
733 control->pause_filter_count =
734 __shrink_ple_window(old,
735 pause_filter_count,
736 pause_filter_count_shrink,
737 pause_filter_count);
4f75bcc3 738 if (control->pause_filter_count != old) {
8566ac8b 739 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
4f75bcc3
PX
740 trace_kvm_ple_window_update(vcpu->vcpu_id,
741 control->pause_filter_count, old);
742 }
8566ac8b
BM
743}
744
52918ed5
TL
745/*
746 * The default MMIO mask is a single bit (excluding the present bit),
747 * which could conflict with the memory encryption bit. Check for
748 * memory encryption support and override the default MMIO mask if
749 * memory encryption is enabled.
750 */
751static __init void svm_adjust_mmio_mask(void)
752{
753 unsigned int enc_bit, mask_bit;
754 u64 msr, mask;
755
756 /* If there is no memory encryption support, use existing mask */
757 if (cpuid_eax(0x80000000) < 0x8000001f)
758 return;
759
760 /* If memory encryption is not enabled, use existing mask */
761 rdmsrl(MSR_K8_SYSCFG, msr);
762 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
763 return;
764
765 enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
766 mask_bit = boot_cpu_data.x86_phys_bits;
767
768 /* Increment the mask bit if it is the same as the encryption bit */
769 if (enc_bit == mask_bit)
770 mask_bit++;
771
772 /*
773 * If the mask bit location is below 52, then some bits above the
774 * physical addressing limit will always be reserved, so use the
775 * rsvd_bits() function to generate the mask. This mask, along with
776 * the present bit, will be used to generate a page fault with
777 * PFER.RSV = 1.
778 *
779 * If the mask bit location is 52 (or above), then clear the mask.
780 */
781 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
782
783 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
784}
785
dd58f3c9
LR
786static void svm_hardware_teardown(void)
787{
788 int cpu;
789
eaf78265
JR
790 if (svm_sev_enabled())
791 sev_hardware_teardown();
dd58f3c9
LR
792
793 for_each_possible_cpu(cpu)
794 svm_cpu_uninit(cpu);
795
796 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
797 iopm_base = 0;
798}
799
9b58b985
SC
800static __init void svm_set_cpu_caps(void)
801{
802 kvm_set_cpu_caps();
803
408e9a31
PB
804 supported_xss = 0;
805
a50718cc
SC
806 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
807 if (nested) {
9b58b985
SC
808 kvm_cpu_cap_set(X86_FEATURE_SVM);
809
4eb87460 810 if (nrips)
a50718cc
SC
811 kvm_cpu_cap_set(X86_FEATURE_NRIPS);
812
813 if (npt_enabled)
814 kvm_cpu_cap_set(X86_FEATURE_NPT);
815 }
816
93c380e7
SC
817 /* CPUID 0x80000008 */
818 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
819 boot_cpu_has(X86_FEATURE_AMD_SSBD))
820 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
9b58b985
SC
821}
822
6aa8b732
AK
823static __init int svm_hardware_setup(void)
824{
825 int cpu;
826 struct page *iopm_pages;
f65c229c 827 void *iopm_va;
6aa8b732
AK
828 int r;
829
6aa8b732
AK
830 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
831
832 if (!iopm_pages)
833 return -ENOMEM;
c8681339
AL
834
835 iopm_va = page_address(iopm_pages);
836 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
6aa8b732
AK
837 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
838
323c3d80
JR
839 init_msrpm_offsets();
840
cfc48181
SC
841 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
842
50a37eb4
JR
843 if (boot_cpu_has(X86_FEATURE_NX))
844 kvm_enable_efer_bits(EFER_NX);
845
1b2fd70c
AG
846 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
847 kvm_enable_efer_bits(EFER_FFXSR);
848
92a1f12d 849 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
92a1f12d 850 kvm_has_tsc_control = true;
bc9b961b
HZ
851 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
852 kvm_tsc_scaling_ratio_frac_bits = 32;
92a1f12d
JR
853 }
854
8566ac8b
BM
855 /* Check for pause filtering support */
856 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
857 pause_filter_count = 0;
858 pause_filter_thresh = 0;
859 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
860 pause_filter_thresh = 0;
861 }
862
236de055
AG
863 if (nested) {
864 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
eec4b140 865 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
236de055
AG
866 }
867
e9df0942
BS
868 if (sev) {
869 if (boot_cpu_has(X86_FEATURE_SEV) &&
870 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
871 r = sev_hardware_setup();
872 if (r)
873 sev = false;
874 } else {
875 sev = false;
876 }
877 }
878
52918ed5
TL
879 svm_adjust_mmio_mask();
880
3230bb47 881 for_each_possible_cpu(cpu) {
6aa8b732
AK
882 r = svm_cpu_init(cpu);
883 if (r)
f65c229c 884 goto err;
6aa8b732 885 }
33bd6a0b 886
2a6b20b8 887 if (!boot_cpu_has(X86_FEATURE_NPT))
e3da3acd
JR
888 npt_enabled = false;
889
213e0e1f 890 if (npt_enabled && !npt)
6c7dac72 891 npt_enabled = false;
6c7dac72 892
703c335d 893 kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL);
213e0e1f 894 pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
e3da3acd 895
d647eb63
PB
896 if (nrips) {
897 if (!boot_cpu_has(X86_FEATURE_NRIPS))
898 nrips = false;
899 }
900
5b8abf1f
SS
901 if (avic) {
902 if (!npt_enabled ||
903 !boot_cpu_has(X86_FEATURE_AVIC) ||
5881f737 904 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
5b8abf1f 905 avic = false;
5881f737 906 } else {
5b8abf1f 907 pr_info("AVIC enabled\n");
5881f737 908
5881f737
SS
909 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
910 }
5b8abf1f 911 }
44a95dae 912
89c8a498
JN
913 if (vls) {
914 if (!npt_enabled ||
5442c269 915 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
89c8a498
JN
916 !IS_ENABLED(CONFIG_X86_64)) {
917 vls = false;
918 } else {
919 pr_info("Virtual VMLOAD VMSAVE supported\n");
920 }
921 }
922
640bd6e5
JN
923 if (vgif) {
924 if (!boot_cpu_has(X86_FEATURE_VGIF))
925 vgif = false;
926 else
927 pr_info("Virtual GIF supported\n");
928 }
929
9b58b985 930 svm_set_cpu_caps();
66a6950f 931
6aa8b732
AK
932 return 0;
933
f65c229c 934err:
dd58f3c9 935 svm_hardware_teardown();
6aa8b732
AK
936 return r;
937}
938
6aa8b732
AK
939static void init_seg(struct vmcb_seg *seg)
940{
941 seg->selector = 0;
942 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
e0231715 943 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
6aa8b732
AK
944 seg->limit = 0xffff;
945 seg->base = 0;
946}
947
948static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
949{
950 seg->selector = 0;
951 seg->attrib = SVM_SELECTOR_P_MASK | type;
952 seg->limit = 0xffff;
953 seg->base = 0;
954}
955
e79f245d
KA
956static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
957{
958 struct vcpu_svm *svm = to_svm(vcpu);
959
960 if (is_guest_mode(vcpu))
961 return svm->nested.hsave->control.tsc_offset;
962
963 return vcpu->arch.tsc_offset;
964}
965
326e7425 966static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
f4e1b3c8
ZA
967{
968 struct vcpu_svm *svm = to_svm(vcpu);
969 u64 g_tsc_offset = 0;
970
2030753d 971 if (is_guest_mode(vcpu)) {
e79f245d 972 /* Write L1's TSC offset. */
f4e1b3c8
ZA
973 g_tsc_offset = svm->vmcb->control.tsc_offset -
974 svm->nested.hsave->control.tsc_offset;
975 svm->nested.hsave->control.tsc_offset = offset;
45c3af97
PB
976 }
977
978 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
979 svm->vmcb->control.tsc_offset - g_tsc_offset,
980 offset);
f4e1b3c8
ZA
981
982 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
116a0a23
JR
983
984 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
326e7425 985 return svm->vmcb->control.tsc_offset;
f4e1b3c8
ZA
986}
987
5690891b 988static void init_vmcb(struct vcpu_svm *svm)
6aa8b732 989{
e6101a96
JR
990 struct vmcb_control_area *control = &svm->vmcb->control;
991 struct vmcb_save_area *save = &svm->vmcb->save;
6aa8b732 992
4ee546b4 993 svm->vcpu.arch.hflags = 0;
bff78274 994
4ee546b4
RJ
995 set_cr_intercept(svm, INTERCEPT_CR0_READ);
996 set_cr_intercept(svm, INTERCEPT_CR3_READ);
997 set_cr_intercept(svm, INTERCEPT_CR4_READ);
998 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
999 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1000 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
3bbf3565
SS
1001 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1002 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
6aa8b732 1003
5315c716 1004 set_dr_intercepts(svm);
6aa8b732 1005
18c918c5
JR
1006 set_exception_intercept(svm, PF_VECTOR);
1007 set_exception_intercept(svm, UD_VECTOR);
1008 set_exception_intercept(svm, MC_VECTOR);
54a20552 1009 set_exception_intercept(svm, AC_VECTOR);
cbdb967a 1010 set_exception_intercept(svm, DB_VECTOR);
9718420e
LA
1011 /*
1012 * Guest access to VMware backdoor ports could legitimately
1013 * trigger #GP because of TSS I/O permission bitmap.
1014 * We intercept those #GP and allow access to them anyway
1015 * as VMware does.
1016 */
1017 if (enable_vmware_backdoor)
1018 set_exception_intercept(svm, GP_VECTOR);
6aa8b732 1019
8a05a1b8
JR
1020 set_intercept(svm, INTERCEPT_INTR);
1021 set_intercept(svm, INTERCEPT_NMI);
1022 set_intercept(svm, INTERCEPT_SMI);
1023 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
332b56e4 1024 set_intercept(svm, INTERCEPT_RDPMC);
8a05a1b8
JR
1025 set_intercept(svm, INTERCEPT_CPUID);
1026 set_intercept(svm, INTERCEPT_INVD);
8a05a1b8
JR
1027 set_intercept(svm, INTERCEPT_INVLPG);
1028 set_intercept(svm, INTERCEPT_INVLPGA);
1029 set_intercept(svm, INTERCEPT_IOIO_PROT);
1030 set_intercept(svm, INTERCEPT_MSR_PROT);
1031 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1032 set_intercept(svm, INTERCEPT_SHUTDOWN);
1033 set_intercept(svm, INTERCEPT_VMRUN);
1034 set_intercept(svm, INTERCEPT_VMMCALL);
1035 set_intercept(svm, INTERCEPT_VMLOAD);
1036 set_intercept(svm, INTERCEPT_VMSAVE);
1037 set_intercept(svm, INTERCEPT_STGI);
1038 set_intercept(svm, INTERCEPT_CLGI);
1039 set_intercept(svm, INTERCEPT_SKINIT);
1040 set_intercept(svm, INTERCEPT_WBINVD);
81dd35d4 1041 set_intercept(svm, INTERCEPT_XSETBV);
0cb8410b 1042 set_intercept(svm, INTERCEPT_RDPRU);
7607b717 1043 set_intercept(svm, INTERCEPT_RSM);
6aa8b732 1044
4d5422ce 1045 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
668fffa3
MT
1046 set_intercept(svm, INTERCEPT_MONITOR);
1047 set_intercept(svm, INTERCEPT_MWAIT);
1048 }
1049
caa057a2
WL
1050 if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1051 set_intercept(svm, INTERCEPT_HLT);
1052
d0ec49d4
TL
1053 control->iopm_base_pa = __sme_set(iopm_base);
1054 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
6aa8b732
AK
1055 control->int_ctl = V_INTR_MASKING_MASK;
1056
1057 init_seg(&save->es);
1058 init_seg(&save->ss);
1059 init_seg(&save->ds);
1060 init_seg(&save->fs);
1061 init_seg(&save->gs);
1062
1063 save->cs.selector = 0xf000;
04b66839 1064 save->cs.base = 0xffff0000;
6aa8b732
AK
1065 /* Executable/Readable Code Segment */
1066 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1067 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1068 save->cs.limit = 0xffff;
6aa8b732
AK
1069
1070 save->gdtr.limit = 0xffff;
1071 save->idtr.limit = 0xffff;
1072
1073 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1074 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1075
5690891b 1076 svm_set_efer(&svm->vcpu, 0);
d77c26fc 1077 save->dr6 = 0xffff0ff0;
f6e78475 1078 kvm_set_rflags(&svm->vcpu, 2);
6aa8b732 1079 save->rip = 0x0000fff0;
5fdbf976 1080 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
6aa8b732 1081
e0231715 1082 /*
18fa000a 1083 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
d28bc9dd 1084 * It also updates the guest-visible cr0 value.
6aa8b732 1085 */
79a8059d 1086 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
ebae871a 1087 kvm_mmu_reset_context(&svm->vcpu);
18fa000a 1088
66aee91a 1089 save->cr4 = X86_CR4_PAE;
6aa8b732 1090 /* rdx = ?? */
709ddebf
JR
1091
1092 if (npt_enabled) {
1093 /* Setup VMCB for Nested Paging */
cea3a19b 1094 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
8a05a1b8 1095 clr_intercept(svm, INTERCEPT_INVLPG);
18c918c5 1096 clr_exception_intercept(svm, PF_VECTOR);
4ee546b4
RJ
1097 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1098 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
74545705 1099 save->g_pat = svm->vcpu.arch.pat;
709ddebf
JR
1100 save->cr3 = 0;
1101 save->cr4 = 0;
1102 }
f40f6a45 1103 svm->asid_generation = 0;
1371d904 1104
e6aa9abd 1105 svm->nested.vmcb = 0;
2af9194d
JR
1106 svm->vcpu.arch.hflags = 0;
1107
8566ac8b
BM
1108 if (pause_filter_count) {
1109 control->pause_filter_count = pause_filter_count;
1110 if (pause_filter_thresh)
1111 control->pause_filter_thresh = pause_filter_thresh;
8a05a1b8 1112 set_intercept(svm, INTERCEPT_PAUSE);
8566ac8b
BM
1113 } else {
1114 clr_intercept(svm, INTERCEPT_PAUSE);
565d0998
ML
1115 }
1116
67034bb9 1117 if (kvm_vcpu_apicv_active(&svm->vcpu))
44a95dae
SS
1118 avic_init_vmcb(svm);
1119
89c8a498
JN
1120 /*
1121 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1122 * in VMCB and clear intercepts to avoid #VMEXIT.
1123 */
1124 if (vls) {
1125 clr_intercept(svm, INTERCEPT_VMLOAD);
1126 clr_intercept(svm, INTERCEPT_VMSAVE);
1127 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1128 }
1129
640bd6e5
JN
1130 if (vgif) {
1131 clr_intercept(svm, INTERCEPT_STGI);
1132 clr_intercept(svm, INTERCEPT_CLGI);
1133 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1134 }
1135
35c6f649 1136 if (sev_guest(svm->vcpu.kvm)) {
1654efcb 1137 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
35c6f649
BS
1138 clr_exception_intercept(svm, UD_VECTOR);
1139 }
1654efcb 1140
8d28fec4
RJ
1141 mark_all_dirty(svm->vmcb);
1142
2af9194d 1143 enable_gif(svm);
44a95dae
SS
1144
1145}
1146
d28bc9dd 1147static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
04d2cc77
AK
1148{
1149 struct vcpu_svm *svm = to_svm(vcpu);
66f7b72e
JS
1150 u32 dummy;
1151 u32 eax = 1;
04d2cc77 1152
b2ac58f9 1153 svm->spec_ctrl = 0;
ccbcd267 1154 svm->virt_spec_ctrl = 0;
b2ac58f9 1155
d28bc9dd
NA
1156 if (!init_event) {
1157 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1158 MSR_IA32_APICBASE_ENABLE;
1159 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1160 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1161 }
5690891b 1162 init_vmcb(svm);
70433389 1163
f91af517 1164 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
de3cd117 1165 kvm_rdx_write(vcpu, eax);
44a95dae
SS
1166
1167 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1168 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
04d2cc77
AK
1169}
1170
987b2594 1171static int svm_create_vcpu(struct kvm_vcpu *vcpu)
6aa8b732 1172{
a2fa3e9f 1173 struct vcpu_svm *svm;
6aa8b732 1174 struct page *page;
f65c229c 1175 struct page *msrpm_pages;
b286d5d8 1176 struct page *hsave_page;
3d6368ef 1177 struct page *nested_msrpm_pages;
fb3f0f51 1178 int err;
6aa8b732 1179
a9dd6f09
SC
1180 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1181 svm = to_svm(vcpu);
fb3f0f51 1182
b7af4043 1183 err = -ENOMEM;
1ec69647 1184 page = alloc_page(GFP_KERNEL_ACCOUNT);
b7af4043 1185 if (!page)
987b2594 1186 goto out;
6aa8b732 1187
1ec69647 1188 msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
f65c229c 1189 if (!msrpm_pages)
b7af4043 1190 goto free_page1;
3d6368ef 1191
1ec69647 1192 nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
3d6368ef 1193 if (!nested_msrpm_pages)
b7af4043 1194 goto free_page2;
f65c229c 1195
1ec69647 1196 hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
b286d5d8 1197 if (!hsave_page)
b7af4043
TY
1198 goto free_page3;
1199
dfa20099
SS
1200 err = avic_init_vcpu(svm);
1201 if (err)
1202 goto free_page4;
44a95dae 1203
8221c137
SS
1204 /* We initialize this flag to true to make sure that the is_running
1205 * bit would be set the first time the vcpu is loaded.
1206 */
6c3e4422
SS
1207 if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
1208 svm->avic_is_running = true;
8221c137 1209
e6aa9abd 1210 svm->nested.hsave = page_address(hsave_page);
b286d5d8 1211
b7af4043
TY
1212 svm->msrpm = page_address(msrpm_pages);
1213 svm_vcpu_init_msrpm(svm->msrpm);
1214
e6aa9abd 1215 svm->nested.msrpm = page_address(nested_msrpm_pages);
323c3d80 1216 svm_vcpu_init_msrpm(svm->nested.msrpm);
3d6368ef 1217
a2fa3e9f
GH
1218 svm->vmcb = page_address(page);
1219 clear_page(svm->vmcb);
d0ec49d4 1220 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
a2fa3e9f 1221 svm->asid_generation = 0;
5690891b 1222 init_vmcb(svm);
6aa8b732 1223
7f27179a 1224 svm_init_osvw(vcpu);
bab0c318 1225 vcpu->arch.microcode_version = 0x01000065;
2b036c6b 1226
a9dd6f09 1227 return 0;
36241b8c 1228
44a95dae
SS
1229free_page4:
1230 __free_page(hsave_page);
b7af4043
TY
1231free_page3:
1232 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1233free_page2:
1234 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1235free_page1:
1236 __free_page(page);
987b2594 1237out:
a9dd6f09 1238 return err;
6aa8b732
AK
1239}
1240
fd65d314
JM
1241static void svm_clear_current_vmcb(struct vmcb *vmcb)
1242{
1243 int i;
1244
1245 for_each_online_cpu(i)
1246 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1247}
1248
6aa8b732
AK
1249static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1250{
a2fa3e9f
GH
1251 struct vcpu_svm *svm = to_svm(vcpu);
1252
fd65d314
JM
1253 /*
1254 * The vmcb page can be recycled, causing a false negative in
1255 * svm_vcpu_load(). So, ensure that no logical CPU has this
1256 * vmcb page recorded as its current vmcb.
1257 */
1258 svm_clear_current_vmcb(svm->vmcb);
1259
d0ec49d4 1260 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
f65c229c 1261 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
e6aa9abd
JR
1262 __free_page(virt_to_page(svm->nested.hsave));
1263 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
6aa8b732
AK
1264}
1265
15ad7146 1266static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 1267{
a2fa3e9f 1268 struct vcpu_svm *svm = to_svm(vcpu);
15d45071 1269 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
15ad7146 1270 int i;
0cc5064d 1271
0cc5064d 1272 if (unlikely(cpu != vcpu->cpu)) {
4b656b12 1273 svm->asid_generation = 0;
8d28fec4 1274 mark_all_dirty(svm->vmcb);
0cc5064d 1275 }
94dfbdb3 1276
82ca2d10
AK
1277#ifdef CONFIG_X86_64
1278 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1279#endif
dacccfdd
AK
1280 savesegment(fs, svm->host.fs);
1281 savesegment(gs, svm->host.gs);
1282 svm->host.ldt = kvm_read_ldt();
1283
94dfbdb3 1284 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
a2fa3e9f 1285 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
fbc0db76 1286
ad721883
HZ
1287 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1288 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1289 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1290 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1291 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1292 }
fbc0db76 1293 }
46896c73
PB
1294 /* This assumes that the kernel never uses MSR_TSC_AUX */
1295 if (static_cpu_has(X86_FEATURE_RDTSCP))
1296 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
8221c137 1297
15d45071
AR
1298 if (sd->current_vmcb != svm->vmcb) {
1299 sd->current_vmcb = svm->vmcb;
1300 indirect_branch_prediction_barrier();
1301 }
8221c137 1302 avic_vcpu_load(vcpu, cpu);
6aa8b732
AK
1303}
1304
1305static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1306{
a2fa3e9f 1307 struct vcpu_svm *svm = to_svm(vcpu);
94dfbdb3
AL
1308 int i;
1309
8221c137
SS
1310 avic_vcpu_put(vcpu);
1311
e1beb1d3 1312 ++vcpu->stat.host_state_reload;
dacccfdd
AK
1313 kvm_load_ldt(svm->host.ldt);
1314#ifdef CONFIG_X86_64
1315 loadsegment(fs, svm->host.fs);
296f781a 1316 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
893a5ab6 1317 load_gs_index(svm->host.gs);
dacccfdd 1318#else
831ca609 1319#ifdef CONFIG_X86_32_LAZY_GS
dacccfdd 1320 loadsegment(gs, svm->host.gs);
831ca609 1321#endif
dacccfdd 1322#endif
94dfbdb3 1323 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
a2fa3e9f 1324 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
6aa8b732
AK
1325}
1326
6aa8b732
AK
1327static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1328{
9b611747
LP
1329 struct vcpu_svm *svm = to_svm(vcpu);
1330 unsigned long rflags = svm->vmcb->save.rflags;
1331
1332 if (svm->nmi_singlestep) {
1333 /* Hide our flags if they were not set by the guest */
1334 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1335 rflags &= ~X86_EFLAGS_TF;
1336 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1337 rflags &= ~X86_EFLAGS_RF;
1338 }
1339 return rflags;
6aa8b732
AK
1340}
1341
1342static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1343{
9b611747
LP
1344 if (to_svm(vcpu)->nmi_singlestep)
1345 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1346
ae9fedc7 1347 /*
bb3541f1 1348 * Any change of EFLAGS.VM is accompanied by a reload of SS
ae9fedc7
PB
1349 * (caused by either a task switch or an inter-privilege IRET),
1350 * so we do not need to update the CPL here.
1351 */
a2fa3e9f 1352 to_svm(vcpu)->vmcb->save.rflags = rflags;
6aa8b732
AK
1353}
1354
6de4f3ad
AK
1355static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1356{
1357 switch (reg) {
1358 case VCPU_EXREG_PDPTR:
1359 BUG_ON(!npt_enabled);
9f8fe504 1360 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
6de4f3ad
AK
1361 break;
1362 default:
34059c25 1363 WARN_ON_ONCE(1);
6de4f3ad
AK
1364 }
1365}
1366
64b5bd27
PB
1367static inline void svm_enable_vintr(struct vcpu_svm *svm)
1368{
1369 struct vmcb_control_area *control;
1370
1371 /* The following fields are ignored when AVIC is enabled */
1372 WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
1373
1374 /*
1375 * This is just a dummy VINTR to actually cause a vmexit to happen.
1376 * Actual injection of virtual interrupts happens through EVENTINJ.
1377 */
1378 control = &svm->vmcb->control;
1379 control->int_vector = 0x0;
1380 control->int_ctl &= ~V_INTR_PRIO_MASK;
1381 control->int_ctl |= V_IRQ_MASK |
1382 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1383 mark_dirty(svm->vmcb, VMCB_INTR);
1384}
1385
f0b85051
AG
1386static void svm_set_vintr(struct vcpu_svm *svm)
1387{
8a05a1b8 1388 set_intercept(svm, INTERCEPT_VINTR);
64b5bd27
PB
1389 if (is_intercept(svm, INTERCEPT_VINTR))
1390 svm_enable_vintr(svm);
f0b85051
AG
1391}
1392
1393static void svm_clear_vintr(struct vcpu_svm *svm)
1394{
8a05a1b8 1395 clr_intercept(svm, INTERCEPT_VINTR);
64b5bd27
PB
1396
1397 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1398 mark_dirty(svm->vmcb, VMCB_INTR);
f0b85051
AG
1399}
1400
6aa8b732
AK
1401static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1402{
a2fa3e9f 1403 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
6aa8b732
AK
1404
1405 switch (seg) {
1406 case VCPU_SREG_CS: return &save->cs;
1407 case VCPU_SREG_DS: return &save->ds;
1408 case VCPU_SREG_ES: return &save->es;
1409 case VCPU_SREG_FS: return &save->fs;
1410 case VCPU_SREG_GS: return &save->gs;
1411 case VCPU_SREG_SS: return &save->ss;
1412 case VCPU_SREG_TR: return &save->tr;
1413 case VCPU_SREG_LDTR: return &save->ldtr;
1414 }
1415 BUG();
8b6d44c7 1416 return NULL;
6aa8b732
AK
1417}
1418
1419static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1420{
1421 struct vmcb_seg *s = svm_seg(vcpu, seg);
1422
1423 return s->base;
1424}
1425
1426static void svm_get_segment(struct kvm_vcpu *vcpu,
1427 struct kvm_segment *var, int seg)
1428{
1429 struct vmcb_seg *s = svm_seg(vcpu, seg);
1430
1431 var->base = s->base;
1432 var->limit = s->limit;
1433 var->selector = s->selector;
1434 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1435 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1436 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1437 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1438 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1439 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1440 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
80112c89
JM
1441
1442 /*
1443 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1444 * However, the SVM spec states that the G bit is not observed by the
1445 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1446 * So let's synthesize a legal G bit for all segments, this helps
1447 * running KVM nested. It also helps cross-vendor migration, because
1448 * Intel's vmentry has a check on the 'G' bit.
1449 */
1450 var->g = s->limit > 0xfffff;
25022acc 1451
e0231715
JR
1452 /*
1453 * AMD's VMCB does not have an explicit unusable field, so emulate it
19bca6ab
AP
1454 * for cross vendor migration purposes by "not present"
1455 */
8eae9570 1456 var->unusable = !var->present;
19bca6ab 1457
1fbdc7a5 1458 switch (seg) {
1fbdc7a5
AP
1459 case VCPU_SREG_TR:
1460 /*
1461 * Work around a bug where the busy flag in the tr selector
1462 * isn't exposed
1463 */
c0d09828 1464 var->type |= 0x2;
1fbdc7a5
AP
1465 break;
1466 case VCPU_SREG_DS:
1467 case VCPU_SREG_ES:
1468 case VCPU_SREG_FS:
1469 case VCPU_SREG_GS:
1470 /*
1471 * The accessed bit must always be set in the segment
1472 * descriptor cache, although it can be cleared in the
1473 * descriptor, the cached bit always remains at 1. Since
1474 * Intel has a check on this, set it here to support
1475 * cross-vendor migration.
1476 */
1477 if (!var->unusable)
1478 var->type |= 0x1;
1479 break;
b586eb02 1480 case VCPU_SREG_SS:
e0231715
JR
1481 /*
1482 * On AMD CPUs sometimes the DB bit in the segment
b586eb02
AP
1483 * descriptor is left as 1, although the whole segment has
1484 * been made unusable. Clear it here to pass an Intel VMX
1485 * entry check when cross vendor migrating.
1486 */
1487 if (var->unusable)
1488 var->db = 0;
d9c1b543 1489 /* This is symmetric with svm_set_segment() */
33b458d2 1490 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
b586eb02 1491 break;
1fbdc7a5 1492 }
6aa8b732
AK
1493}
1494
2e4d2653
IE
1495static int svm_get_cpl(struct kvm_vcpu *vcpu)
1496{
1497 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1498
1499 return save->cpl;
1500}
1501
89a27f4d 1502static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 1503{
a2fa3e9f
GH
1504 struct vcpu_svm *svm = to_svm(vcpu);
1505
89a27f4d
GN
1506 dt->size = svm->vmcb->save.idtr.limit;
1507 dt->address = svm->vmcb->save.idtr.base;
6aa8b732
AK
1508}
1509
89a27f4d 1510static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 1511{
a2fa3e9f
GH
1512 struct vcpu_svm *svm = to_svm(vcpu);
1513
89a27f4d
GN
1514 svm->vmcb->save.idtr.limit = dt->size;
1515 svm->vmcb->save.idtr.base = dt->address ;
17a703cb 1516 mark_dirty(svm->vmcb, VMCB_DT);
6aa8b732
AK
1517}
1518
89a27f4d 1519static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 1520{
a2fa3e9f
GH
1521 struct vcpu_svm *svm = to_svm(vcpu);
1522
89a27f4d
GN
1523 dt->size = svm->vmcb->save.gdtr.limit;
1524 dt->address = svm->vmcb->save.gdtr.base;
6aa8b732
AK
1525}
1526
89a27f4d 1527static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 1528{
a2fa3e9f
GH
1529 struct vcpu_svm *svm = to_svm(vcpu);
1530
89a27f4d
GN
1531 svm->vmcb->save.gdtr.limit = dt->size;
1532 svm->vmcb->save.gdtr.base = dt->address ;
17a703cb 1533 mark_dirty(svm->vmcb, VMCB_DT);
6aa8b732
AK
1534}
1535
e8467fda
AK
1536static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1537{
1538}
1539
25c4c276 1540static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3
AK
1541{
1542}
1543
d225157b
AK
1544static void update_cr0_intercept(struct vcpu_svm *svm)
1545{
1546 ulong gcr0 = svm->vcpu.arch.cr0;
1547 u64 *hcr0 = &svm->vmcb->save.cr0;
1548
bd7e5b08
PB
1549 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1550 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
d225157b 1551
dcca1a65 1552 mark_dirty(svm->vmcb, VMCB_CR);
d225157b 1553
bd7e5b08 1554 if (gcr0 == *hcr0) {
4ee546b4
RJ
1555 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1556 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
d225157b 1557 } else {
4ee546b4
RJ
1558 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1559 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
d225157b
AK
1560 }
1561}
1562
883b0a91 1563void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
6aa8b732 1564{
a2fa3e9f
GH
1565 struct vcpu_svm *svm = to_svm(vcpu);
1566
05b3e0c2 1567#ifdef CONFIG_X86_64
f6801dff 1568 if (vcpu->arch.efer & EFER_LME) {
707d92fa 1569 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
f6801dff 1570 vcpu->arch.efer |= EFER_LMA;
2b5203ee 1571 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
6aa8b732
AK
1572 }
1573
d77c26fc 1574 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
f6801dff 1575 vcpu->arch.efer &= ~EFER_LMA;
2b5203ee 1576 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
6aa8b732
AK
1577 }
1578 }
1579#endif
ad312c7c 1580 vcpu->arch.cr0 = cr0;
888f9f3e
AK
1581
1582 if (!npt_enabled)
1583 cr0 |= X86_CR0_PG | X86_CR0_WP;
02daab21 1584
bcf166a9
PB
1585 /*
1586 * re-enable caching here because the QEMU bios
1587 * does not do it - this results in some delay at
1588 * reboot
1589 */
1590 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1591 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
a2fa3e9f 1592 svm->vmcb->save.cr0 = cr0;
dcca1a65 1593 mark_dirty(svm->vmcb, VMCB_CR);
d225157b 1594 update_cr0_intercept(svm);
6aa8b732
AK
1595}
1596
883b0a91 1597int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
6aa8b732 1598{
1e02ce4c 1599 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
e5eab0ce
JR
1600 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1601
5e1746d6
NHE
1602 if (cr4 & X86_CR4_VMXE)
1603 return 1;
1604
e5eab0ce 1605 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
c2ba05cc 1606 svm_flush_tlb(vcpu, true);
6394b649 1607
ec077263
JR
1608 vcpu->arch.cr4 = cr4;
1609 if (!npt_enabled)
1610 cr4 |= X86_CR4_PAE;
6394b649 1611 cr4 |= host_cr4_mce;
ec077263 1612 to_svm(vcpu)->vmcb->save.cr4 = cr4;
dcca1a65 1613 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
5e1746d6 1614 return 0;
6aa8b732
AK
1615}
1616
1617static void svm_set_segment(struct kvm_vcpu *vcpu,
1618 struct kvm_segment *var, int seg)
1619{
a2fa3e9f 1620 struct vcpu_svm *svm = to_svm(vcpu);
6aa8b732
AK
1621 struct vmcb_seg *s = svm_seg(vcpu, seg);
1622
1623 s->base = var->base;
1624 s->limit = var->limit;
1625 s->selector = var->selector;
d9c1b543
RP
1626 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1627 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1628 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1629 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1630 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1631 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1632 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1633 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
ae9fedc7
PB
1634
1635 /*
1636 * This is always accurate, except if SYSRET returned to a segment
1637 * with SS.DPL != 3. Intel does not have this quirk, and always
1638 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1639 * would entail passing the CPL to userspace and back.
1640 */
1641 if (seg == VCPU_SREG_SS)
d9c1b543
RP
1642 /* This is symmetric with svm_get_segment() */
1643 svm->vmcb->save.cpl = (var->dpl & 3);
6aa8b732 1644
060d0c9a 1645 mark_dirty(svm->vmcb, VMCB_SEG);
6aa8b732
AK
1646}
1647
cbdb967a 1648static void update_bp_intercept(struct kvm_vcpu *vcpu)
6aa8b732 1649{
d0bfb940
JK
1650 struct vcpu_svm *svm = to_svm(vcpu);
1651
18c918c5 1652 clr_exception_intercept(svm, BP_VECTOR);
44c11430 1653
d0bfb940 1654 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
d0bfb940 1655 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
18c918c5 1656 set_exception_intercept(svm, BP_VECTOR);
d0bfb940
JK
1657 } else
1658 vcpu->guest_debug = 0;
44c11430
GN
1659}
1660
0fe1e009 1661static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
6aa8b732 1662{
0fe1e009
TH
1663 if (sd->next_asid > sd->max_asid) {
1664 ++sd->asid_generation;
4faefff3 1665 sd->next_asid = sd->min_asid;
a2fa3e9f 1666 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
6aa8b732
AK
1667 }
1668
0fe1e009
TH
1669 svm->asid_generation = sd->asid_generation;
1670 svm->vmcb->control.asid = sd->next_asid++;
d48086d1
JR
1671
1672 mark_dirty(svm->vmcb, VMCB_ASID);
6aa8b732
AK
1673}
1674
73aaf249
JK
1675static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1676{
1677 return to_svm(vcpu)->vmcb->save.dr6;
1678}
1679
1680static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1681{
1682 struct vcpu_svm *svm = to_svm(vcpu);
1683
1684 svm->vmcb->save.dr6 = value;
1685 mark_dirty(svm->vmcb, VMCB_DR);
1686}
1687
facb0139
PB
1688static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1689{
1690 struct vcpu_svm *svm = to_svm(vcpu);
1691
1692 get_debugreg(vcpu->arch.db[0], 0);
1693 get_debugreg(vcpu->arch.db[1], 1);
1694 get_debugreg(vcpu->arch.db[2], 2);
1695 get_debugreg(vcpu->arch.db[3], 3);
1696 vcpu->arch.dr6 = svm_get_dr6(vcpu);
1697 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1698
1699 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1700 set_dr_intercepts(svm);
1701}
1702
020df079 1703static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
6aa8b732 1704{
42dbaa5a 1705 struct vcpu_svm *svm = to_svm(vcpu);
42dbaa5a 1706
020df079 1707 svm->vmcb->save.dr7 = value;
72214b96 1708 mark_dirty(svm->vmcb, VMCB_DR);
6aa8b732
AK
1709}
1710
851ba692 1711static int pf_interception(struct vcpu_svm *svm)
6aa8b732 1712{
0ede79e1 1713 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1261bfa3 1714 u64 error_code = svm->vmcb->control.exit_info_1;
6aa8b732 1715
1261bfa3 1716 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
00b10fe1
BS
1717 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1718 svm->vmcb->control.insn_bytes : NULL,
d0006530
PB
1719 svm->vmcb->control.insn_len);
1720}
1721
1722static int npf_interception(struct vcpu_svm *svm)
1723{
0ede79e1 1724 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
d0006530
PB
1725 u64 error_code = svm->vmcb->control.exit_info_1;
1726
1727 trace_kvm_page_fault(fault_address, error_code);
1728 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
00b10fe1
BS
1729 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1730 svm->vmcb->control.insn_bytes : NULL,
d0006530 1731 svm->vmcb->control.insn_len);
6aa8b732
AK
1732}
1733
851ba692 1734static int db_interception(struct vcpu_svm *svm)
d0bfb940 1735{
851ba692 1736 struct kvm_run *kvm_run = svm->vcpu.run;
99c22179 1737 struct kvm_vcpu *vcpu = &svm->vcpu;
851ba692 1738
d0bfb940 1739 if (!(svm->vcpu.guest_debug &
44c11430 1740 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
6be7d306 1741 !svm->nmi_singlestep) {
d0bfb940
JK
1742 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1743 return 1;
1744 }
44c11430 1745
6be7d306 1746 if (svm->nmi_singlestep) {
4aebd0e9 1747 disable_nmi_singlestep(svm);
99c22179
VK
1748 /* Make sure we check for pending NMIs upon entry */
1749 kvm_make_request(KVM_REQ_EVENT, vcpu);
44c11430
GN
1750 }
1751
1752 if (svm->vcpu.guest_debug &
e0231715 1753 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
44c11430
GN
1754 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1755 kvm_run->debug.arch.pc =
1756 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1757 kvm_run->debug.arch.exception = DB_VECTOR;
1758 return 0;
1759 }
1760
1761 return 1;
d0bfb940
JK
1762}
1763
851ba692 1764static int bp_interception(struct vcpu_svm *svm)
d0bfb940 1765{
851ba692
AK
1766 struct kvm_run *kvm_run = svm->vcpu.run;
1767
d0bfb940
JK
1768 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1769 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1770 kvm_run->debug.arch.exception = BP_VECTOR;
1771 return 0;
1772}
1773
851ba692 1774static int ud_interception(struct vcpu_svm *svm)
7aa81cc0 1775{
082d06ed 1776 return handle_ud(&svm->vcpu);
7aa81cc0
AL
1777}
1778
54a20552
EN
1779static int ac_interception(struct vcpu_svm *svm)
1780{
1781 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
1782 return 1;
1783}
1784
9718420e
LA
1785static int gp_interception(struct vcpu_svm *svm)
1786{
1787 struct kvm_vcpu *vcpu = &svm->vcpu;
1788 u32 error_code = svm->vmcb->control.exit_info_1;
9718420e
LA
1789
1790 WARN_ON_ONCE(!enable_vmware_backdoor);
1791
a6c6ed1e
SC
1792 /*
1793 * VMware backdoor emulation on #GP interception only handles IN{S},
1794 * OUT{S}, and RDPMC, none of which generate a non-zero error code.
1795 */
1796 if (error_code) {
1797 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1798 return 1;
1799 }
60fc3d02 1800 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
9718420e
LA
1801}
1802
67ec6607
JR
1803static bool is_erratum_383(void)
1804{
1805 int err, i;
1806 u64 value;
1807
1808 if (!erratum_383_found)
1809 return false;
1810
1811 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1812 if (err)
1813 return false;
1814
1815 /* Bit 62 may or may not be set for this mce */
1816 value &= ~(1ULL << 62);
1817
1818 if (value != 0xb600000000010015ULL)
1819 return false;
1820
1821 /* Clear MCi_STATUS registers */
1822 for (i = 0; i < 6; ++i)
1823 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1824
1825 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1826 if (!err) {
1827 u32 low, high;
1828
1829 value &= ~(1ULL << 2);
1830 low = lower_32_bits(value);
1831 high = upper_32_bits(value);
1832
1833 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1834 }
1835
1836 /* Flush tlb to evict multi-match entries */
1837 __flush_tlb_all();
1838
1839 return true;
1840}
1841
fe5913e4 1842static void svm_handle_mce(struct vcpu_svm *svm)
53371b50 1843{
67ec6607
JR
1844 if (is_erratum_383()) {
1845 /*
1846 * Erratum 383 triggered. Guest state is corrupt so kill the
1847 * guest.
1848 */
1849 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1850
a8eeb04a 1851 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
67ec6607
JR
1852
1853 return;
1854 }
1855
53371b50
JR
1856 /*
1857 * On an #MC intercept the MCE handler is not called automatically in
1858 * the host. So do it by hand here.
1859 */
1860 asm volatile (
1861 "int $0x12\n");
1862 /* not sure if we ever come back to this point */
1863
fe5913e4
JR
1864 return;
1865}
1866
1867static int mc_interception(struct vcpu_svm *svm)
1868{
53371b50
JR
1869 return 1;
1870}
1871
851ba692 1872static int shutdown_interception(struct vcpu_svm *svm)
46fe4ddd 1873{
851ba692
AK
1874 struct kvm_run *kvm_run = svm->vcpu.run;
1875
46fe4ddd
JR
1876 /*
1877 * VMCB is undefined after a SHUTDOWN intercept
1878 * so reinitialize it.
1879 */
a2fa3e9f 1880 clear_page(svm->vmcb);
5690891b 1881 init_vmcb(svm);
46fe4ddd
JR
1882
1883 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1884 return 0;
1885}
1886
851ba692 1887static int io_interception(struct vcpu_svm *svm)
6aa8b732 1888{
cf8f70bf 1889 struct kvm_vcpu *vcpu = &svm->vcpu;
d77c26fc 1890 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
dca7f128 1891 int size, in, string;
039576c0 1892 unsigned port;
6aa8b732 1893
e756fc62 1894 ++svm->vcpu.stat.io_exits;
e70669ab 1895 string = (io_info & SVM_IOIO_STR_MASK) != 0;
039576c0 1896 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
8370c3d0 1897 if (string)
60fc3d02 1898 return kvm_emulate_instruction(vcpu, 0);
cf8f70bf 1899
039576c0
AK
1900 port = io_info >> 16;
1901 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
cf8f70bf 1902 svm->next_rip = svm->vmcb->control.exit_info_2;
cf8f70bf 1903
dca7f128 1904 return kvm_fast_pio(&svm->vcpu, size, port, in);
6aa8b732
AK
1905}
1906
851ba692 1907static int nmi_interception(struct vcpu_svm *svm)
c47f098d
JR
1908{
1909 return 1;
1910}
1911
851ba692 1912static int intr_interception(struct vcpu_svm *svm)
a0698055
JR
1913{
1914 ++svm->vcpu.stat.irq_exits;
1915 return 1;
1916}
1917
851ba692 1918static int nop_on_interception(struct vcpu_svm *svm)
6aa8b732
AK
1919{
1920 return 1;
1921}
1922
851ba692 1923static int halt_interception(struct vcpu_svm *svm)
6aa8b732 1924{
e756fc62 1925 return kvm_emulate_halt(&svm->vcpu);
6aa8b732
AK
1926}
1927
851ba692 1928static int vmmcall_interception(struct vcpu_svm *svm)
02e235bc 1929{
0d9c055e 1930 return kvm_emulate_hypercall(&svm->vcpu);
02e235bc
AK
1931}
1932
851ba692 1933static int vmload_interception(struct vcpu_svm *svm)
5542675b 1934{
9966bf68 1935 struct vmcb *nested_vmcb;
8c5fbf1a 1936 struct kvm_host_map map;
b742c1e6 1937 int ret;
9966bf68 1938
5542675b
AG
1939 if (nested_svm_check_permissions(svm))
1940 return 1;
1941
8c5fbf1a
KA
1942 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
1943 if (ret) {
1944 if (ret == -EINVAL)
1945 kvm_inject_gp(&svm->vcpu, 0);
9966bf68 1946 return 1;
8c5fbf1a
KA
1947 }
1948
1949 nested_vmcb = map.hva;
9966bf68 1950
b742c1e6 1951 ret = kvm_skip_emulated_instruction(&svm->vcpu);
e3e9ed3d 1952
9966bf68 1953 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
8c5fbf1a 1954 kvm_vcpu_unmap(&svm->vcpu, &map, true);
5542675b 1955
b742c1e6 1956 return ret;
5542675b
AG
1957}
1958
851ba692 1959static int vmsave_interception(struct vcpu_svm *svm)
5542675b 1960{
9966bf68 1961 struct vmcb *nested_vmcb;
8c5fbf1a 1962 struct kvm_host_map map;
b742c1e6 1963 int ret;
9966bf68 1964
5542675b
AG
1965 if (nested_svm_check_permissions(svm))
1966 return 1;
1967
8c5fbf1a
KA
1968 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
1969 if (ret) {
1970 if (ret == -EINVAL)
1971 kvm_inject_gp(&svm->vcpu, 0);
9966bf68 1972 return 1;
8c5fbf1a
KA
1973 }
1974
1975 nested_vmcb = map.hva;
9966bf68 1976
b742c1e6 1977 ret = kvm_skip_emulated_instruction(&svm->vcpu);
e3e9ed3d 1978
9966bf68 1979 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
8c5fbf1a 1980 kvm_vcpu_unmap(&svm->vcpu, &map, true);
5542675b 1981
b742c1e6 1982 return ret;
5542675b
AG
1983}
1984
851ba692 1985static int vmrun_interception(struct vcpu_svm *svm)
3d6368ef 1986{
3d6368ef
AG
1987 if (nested_svm_check_permissions(svm))
1988 return 1;
1989
e7134c1b 1990 return nested_svm_vmrun(svm);
3d6368ef
AG
1991}
1992
851ba692 1993static int stgi_interception(struct vcpu_svm *svm)
1371d904 1994{
b742c1e6
LP
1995 int ret;
1996
1371d904
AG
1997 if (nested_svm_check_permissions(svm))
1998 return 1;
1999
640bd6e5
JN
2000 /*
2001 * If VGIF is enabled, the STGI intercept is only added to
cc3d967f 2002 * detect the opening of the SMI/NMI window; remove it now.
640bd6e5
JN
2003 */
2004 if (vgif_enabled(svm))
2005 clr_intercept(svm, INTERCEPT_STGI);
2006
b742c1e6 2007 ret = kvm_skip_emulated_instruction(&svm->vcpu);
3842d135 2008 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1371d904 2009
2af9194d 2010 enable_gif(svm);
1371d904 2011
b742c1e6 2012 return ret;
1371d904
AG
2013}
2014
851ba692 2015static int clgi_interception(struct vcpu_svm *svm)
1371d904 2016{
b742c1e6
LP
2017 int ret;
2018
1371d904
AG
2019 if (nested_svm_check_permissions(svm))
2020 return 1;
2021
b742c1e6 2022 ret = kvm_skip_emulated_instruction(&svm->vcpu);
1371d904 2023
2af9194d 2024 disable_gif(svm);
1371d904
AG
2025
2026 /* After a CLGI no interrupts should come */
64b5bd27 2027 if (!kvm_vcpu_apicv_active(&svm->vcpu))
340d3bc3 2028 svm_clear_vintr(svm);
decdbf6a 2029
b742c1e6 2030 return ret;
1371d904
AG
2031}
2032
851ba692 2033static int invlpga_interception(struct vcpu_svm *svm)
ff092385
AG
2034{
2035 struct kvm_vcpu *vcpu = &svm->vcpu;
ff092385 2036
de3cd117
SC
2037 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
2038 kvm_rax_read(&svm->vcpu));
ec1ff790 2039
ff092385 2040 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
de3cd117 2041 kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
ff092385 2042
b742c1e6 2043 return kvm_skip_emulated_instruction(&svm->vcpu);
ff092385
AG
2044}
2045
532a46b9
JR
2046static int skinit_interception(struct vcpu_svm *svm)
2047{
de3cd117 2048 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
532a46b9
JR
2049
2050 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2051 return 1;
2052}
2053
dab429a7
DK
2054static int wbinvd_interception(struct vcpu_svm *svm)
2055{
6affcbed 2056 return kvm_emulate_wbinvd(&svm->vcpu);
dab429a7
DK
2057}
2058
81dd35d4
JR
2059static int xsetbv_interception(struct vcpu_svm *svm)
2060{
2061 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
de3cd117 2062 u32 index = kvm_rcx_read(&svm->vcpu);
81dd35d4
JR
2063
2064 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
b742c1e6 2065 return kvm_skip_emulated_instruction(&svm->vcpu);
81dd35d4
JR
2066 }
2067
2068 return 1;
2069}
2070
0cb8410b
JM
2071static int rdpru_interception(struct vcpu_svm *svm)
2072{
2073 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2074 return 1;
2075}
2076
851ba692 2077static int task_switch_interception(struct vcpu_svm *svm)
6aa8b732 2078{
37817f29 2079 u16 tss_selector;
64a7ec06
GN
2080 int reason;
2081 int int_type = svm->vmcb->control.exit_int_info &
2082 SVM_EXITINTINFO_TYPE_MASK;
8317c298 2083 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
fe8e7f83
GN
2084 uint32_t type =
2085 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2086 uint32_t idt_v =
2087 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
e269fb21
JK
2088 bool has_error_code = false;
2089 u32 error_code = 0;
37817f29
IE
2090
2091 tss_selector = (u16)svm->vmcb->control.exit_info_1;
64a7ec06 2092
37817f29
IE
2093 if (svm->vmcb->control.exit_info_2 &
2094 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
64a7ec06
GN
2095 reason = TASK_SWITCH_IRET;
2096 else if (svm->vmcb->control.exit_info_2 &
2097 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2098 reason = TASK_SWITCH_JMP;
fe8e7f83 2099 else if (idt_v)
64a7ec06
GN
2100 reason = TASK_SWITCH_GATE;
2101 else
2102 reason = TASK_SWITCH_CALL;
2103
fe8e7f83
GN
2104 if (reason == TASK_SWITCH_GATE) {
2105 switch (type) {
2106 case SVM_EXITINTINFO_TYPE_NMI:
2107 svm->vcpu.arch.nmi_injected = false;
2108 break;
2109 case SVM_EXITINTINFO_TYPE_EXEPT:
e269fb21
JK
2110 if (svm->vmcb->control.exit_info_2 &
2111 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2112 has_error_code = true;
2113 error_code =
2114 (u32)svm->vmcb->control.exit_info_2;
2115 }
fe8e7f83
GN
2116 kvm_clear_exception_queue(&svm->vcpu);
2117 break;
2118 case SVM_EXITINTINFO_TYPE_INTR:
2119 kvm_clear_interrupt_queue(&svm->vcpu);
2120 break;
2121 default:
2122 break;
2123 }
2124 }
64a7ec06 2125
8317c298
GN
2126 if (reason != TASK_SWITCH_GATE ||
2127 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2128 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
f8ea7c60 2129 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
60fc3d02 2130 if (!skip_emulated_instruction(&svm->vcpu))
738fece4 2131 return 0;
f8ea7c60 2132 }
64a7ec06 2133
7f3d35fd
KW
2134 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2135 int_vec = -1;
2136
1051778f 2137 return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
60fc3d02 2138 has_error_code, error_code);
6aa8b732
AK
2139}
2140
851ba692 2141static int cpuid_interception(struct vcpu_svm *svm)
6aa8b732 2142{
6a908b62 2143 return kvm_emulate_cpuid(&svm->vcpu);
6aa8b732
AK
2144}
2145
851ba692 2146static int iret_interception(struct vcpu_svm *svm)
95ba8273
GN
2147{
2148 ++svm->vcpu.stat.nmi_window_exits;
8a05a1b8 2149 clr_intercept(svm, INTERCEPT_IRET);
44c11430 2150 svm->vcpu.arch.hflags |= HF_IRET_MASK;
bd3d1ec3 2151 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
f303b4ce 2152 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
95ba8273
GN
2153 return 1;
2154}
2155
851ba692 2156static int invlpg_interception(struct vcpu_svm *svm)
a7052897 2157{
df4f3108 2158 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
60fc3d02 2159 return kvm_emulate_instruction(&svm->vcpu, 0);
df4f3108
AP
2160
2161 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
b742c1e6 2162 return kvm_skip_emulated_instruction(&svm->vcpu);
a7052897
MT
2163}
2164
851ba692 2165static int emulate_on_interception(struct vcpu_svm *svm)
6aa8b732 2166{
60fc3d02 2167 return kvm_emulate_instruction(&svm->vcpu, 0);
6aa8b732
AK
2168}
2169
7607b717
BS
2170static int rsm_interception(struct vcpu_svm *svm)
2171{
60fc3d02 2172 return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
7607b717
BS
2173}
2174
332b56e4
AK
2175static int rdpmc_interception(struct vcpu_svm *svm)
2176{
2177 int err;
2178
d647eb63 2179 if (!nrips)
332b56e4
AK
2180 return emulate_on_interception(svm);
2181
2182 err = kvm_rdpmc(&svm->vcpu);
6affcbed 2183 return kvm_complete_insn_gp(&svm->vcpu, err);
332b56e4
AK
2184}
2185
52eb5a6d
XL
2186static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
2187 unsigned long val)
628afd2a
JR
2188{
2189 unsigned long cr0 = svm->vcpu.arch.cr0;
2190 bool ret = false;
2191 u64 intercept;
2192
2193 intercept = svm->nested.intercept;
2194
2195 if (!is_guest_mode(&svm->vcpu) ||
2196 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2197 return false;
2198
2199 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2200 val &= ~SVM_CR0_SELECTIVE_MASK;
2201
2202 if (cr0 ^ val) {
2203 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2204 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2205 }
2206
2207 return ret;
2208}
2209
7ff76d58
AP
2210#define CR_VALID (1ULL << 63)
2211
2212static int cr_interception(struct vcpu_svm *svm)
2213{
2214 int reg, cr;
2215 unsigned long val;
2216 int err;
2217
2218 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2219 return emulate_on_interception(svm);
2220
2221 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2222 return emulate_on_interception(svm);
2223
2224 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
5e57518d
DK
2225 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2226 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2227 else
2228 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
7ff76d58
AP
2229
2230 err = 0;
2231 if (cr >= 16) { /* mov to cr */
2232 cr -= 16;
2233 val = kvm_register_read(&svm->vcpu, reg);
2234 switch (cr) {
2235 case 0:
628afd2a
JR
2236 if (!check_selective_cr0_intercepted(svm, val))
2237 err = kvm_set_cr0(&svm->vcpu, val);
977b2d03
JR
2238 else
2239 return 1;
2240
7ff76d58
AP
2241 break;
2242 case 3:
2243 err = kvm_set_cr3(&svm->vcpu, val);
2244 break;
2245 case 4:
2246 err = kvm_set_cr4(&svm->vcpu, val);
2247 break;
2248 case 8:
2249 err = kvm_set_cr8(&svm->vcpu, val);
2250 break;
2251 default:
2252 WARN(1, "unhandled write to CR%d", cr);
2253 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2254 return 1;
2255 }
2256 } else { /* mov from cr */
2257 switch (cr) {
2258 case 0:
2259 val = kvm_read_cr0(&svm->vcpu);
2260 break;
2261 case 2:
2262 val = svm->vcpu.arch.cr2;
2263 break;
2264 case 3:
9f8fe504 2265 val = kvm_read_cr3(&svm->vcpu);
7ff76d58
AP
2266 break;
2267 case 4:
2268 val = kvm_read_cr4(&svm->vcpu);
2269 break;
2270 case 8:
2271 val = kvm_get_cr8(&svm->vcpu);
2272 break;
2273 default:
2274 WARN(1, "unhandled read from CR%d", cr);
2275 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2276 return 1;
2277 }
2278 kvm_register_write(&svm->vcpu, reg, val);
2279 }
6affcbed 2280 return kvm_complete_insn_gp(&svm->vcpu, err);
7ff76d58
AP
2281}
2282
cae3797a
AP
2283static int dr_interception(struct vcpu_svm *svm)
2284{
2285 int reg, dr;
2286 unsigned long val;
cae3797a 2287
facb0139
PB
2288 if (svm->vcpu.guest_debug == 0) {
2289 /*
2290 * No more DR vmexits; force a reload of the debug registers
2291 * and reenter on this instruction. The next vmexit will
2292 * retrieve the full state of the debug registers.
2293 */
2294 clr_dr_intercepts(svm);
2295 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2296 return 1;
2297 }
2298
cae3797a
AP
2299 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2300 return emulate_on_interception(svm);
2301
2302 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2303 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2304
2305 if (dr >= 16) { /* mov to DRn */
16f8a6f9
NA
2306 if (!kvm_require_dr(&svm->vcpu, dr - 16))
2307 return 1;
cae3797a
AP
2308 val = kvm_register_read(&svm->vcpu, reg);
2309 kvm_set_dr(&svm->vcpu, dr - 16, val);
2310 } else {
16f8a6f9
NA
2311 if (!kvm_require_dr(&svm->vcpu, dr))
2312 return 1;
2313 kvm_get_dr(&svm->vcpu, dr, &val);
2314 kvm_register_write(&svm->vcpu, reg, val);
cae3797a
AP
2315 }
2316
b742c1e6 2317 return kvm_skip_emulated_instruction(&svm->vcpu);
cae3797a
AP
2318}
2319
851ba692 2320static int cr8_write_interception(struct vcpu_svm *svm)
1d075434 2321{
851ba692 2322 struct kvm_run *kvm_run = svm->vcpu.run;
eea1cff9 2323 int r;
851ba692 2324
0a5fff19
GN
2325 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2326 /* instruction emulation calls kvm_set_cr8() */
7ff76d58 2327 r = cr_interception(svm);
35754c98 2328 if (lapic_in_kernel(&svm->vcpu))
7ff76d58 2329 return r;
0a5fff19 2330 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
7ff76d58 2331 return r;
1d075434
JR
2332 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2333 return 0;
2334}
2335
801e459a
TL
2336static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2337{
d1d93fa9
TL
2338 msr->data = 0;
2339
2340 switch (msr->index) {
2341 case MSR_F10H_DECFG:
2342 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
2343 msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
2344 break;
2345 default:
2346 return 1;
2347 }
2348
2349 return 0;
801e459a
TL
2350}
2351
609e36d3 2352static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
6aa8b732 2353{
a2fa3e9f
GH
2354 struct vcpu_svm *svm = to_svm(vcpu);
2355
609e36d3 2356 switch (msr_info->index) {
8c06585d 2357 case MSR_STAR:
609e36d3 2358 msr_info->data = svm->vmcb->save.star;
6aa8b732 2359 break;
0e859cac 2360#ifdef CONFIG_X86_64
6aa8b732 2361 case MSR_LSTAR:
609e36d3 2362 msr_info->data = svm->vmcb->save.lstar;
6aa8b732
AK
2363 break;
2364 case MSR_CSTAR:
609e36d3 2365 msr_info->data = svm->vmcb->save.cstar;
6aa8b732
AK
2366 break;
2367 case MSR_KERNEL_GS_BASE:
609e36d3 2368 msr_info->data = svm->vmcb->save.kernel_gs_base;
6aa8b732
AK
2369 break;
2370 case MSR_SYSCALL_MASK:
609e36d3 2371 msr_info->data = svm->vmcb->save.sfmask;
6aa8b732
AK
2372 break;
2373#endif
2374 case MSR_IA32_SYSENTER_CS:
609e36d3 2375 msr_info->data = svm->vmcb->save.sysenter_cs;
6aa8b732
AK
2376 break;
2377 case MSR_IA32_SYSENTER_EIP:
609e36d3 2378 msr_info->data = svm->sysenter_eip;
6aa8b732
AK
2379 break;
2380 case MSR_IA32_SYSENTER_ESP:
609e36d3 2381 msr_info->data = svm->sysenter_esp;
6aa8b732 2382 break;
46896c73
PB
2383 case MSR_TSC_AUX:
2384 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2385 return 1;
2386 msr_info->data = svm->tsc_aux;
2387 break;
e0231715
JR
2388 /*
2389 * Nobody will change the following 5 values in the VMCB so we can
2390 * safely return them on rdmsr. They will always be 0 until LBRV is
2391 * implemented.
2392 */
a2938c80 2393 case MSR_IA32_DEBUGCTLMSR:
609e36d3 2394 msr_info->data = svm->vmcb->save.dbgctl;
a2938c80
JR
2395 break;
2396 case MSR_IA32_LASTBRANCHFROMIP:
609e36d3 2397 msr_info->data = svm->vmcb->save.br_from;
a2938c80
JR
2398 break;
2399 case MSR_IA32_LASTBRANCHTOIP:
609e36d3 2400 msr_info->data = svm->vmcb->save.br_to;
a2938c80
JR
2401 break;
2402 case MSR_IA32_LASTINTFROMIP:
609e36d3 2403 msr_info->data = svm->vmcb->save.last_excp_from;
a2938c80
JR
2404 break;
2405 case MSR_IA32_LASTINTTOIP:
609e36d3 2406 msr_info->data = svm->vmcb->save.last_excp_to;
a2938c80 2407 break;
b286d5d8 2408 case MSR_VM_HSAVE_PA:
609e36d3 2409 msr_info->data = svm->nested.hsave_msr;
b286d5d8 2410 break;
eb6f302e 2411 case MSR_VM_CR:
609e36d3 2412 msr_info->data = svm->nested.vm_cr_msr;
eb6f302e 2413 break;
b2ac58f9
KA
2414 case MSR_IA32_SPEC_CTRL:
2415 if (!msr_info->host_initiated &&
df7e8818
PB
2416 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
2417 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
6ac2f49e
KRW
2418 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
2419 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
b2ac58f9
KA
2420 return 1;
2421
2422 msr_info->data = svm->spec_ctrl;
2423 break;
bc226f07
TL
2424 case MSR_AMD64_VIRT_SPEC_CTRL:
2425 if (!msr_info->host_initiated &&
2426 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2427 return 1;
2428
2429 msr_info->data = svm->virt_spec_ctrl;
2430 break;
ae8b7875
BP
2431 case MSR_F15H_IC_CFG: {
2432
2433 int family, model;
2434
2435 family = guest_cpuid_family(vcpu);
2436 model = guest_cpuid_model(vcpu);
2437
2438 if (family < 0 || model < 0)
2439 return kvm_get_msr_common(vcpu, msr_info);
2440
2441 msr_info->data = 0;
2442
2443 if (family == 0x15 &&
2444 (model >= 0x2 && model < 0x20))
2445 msr_info->data = 0x1E;
2446 }
2447 break;
d1d93fa9
TL
2448 case MSR_F10H_DECFG:
2449 msr_info->data = svm->msr_decfg;
2450 break;
6aa8b732 2451 default:
609e36d3 2452 return kvm_get_msr_common(vcpu, msr_info);
6aa8b732
AK
2453 }
2454 return 0;
2455}
2456
851ba692 2457static int rdmsr_interception(struct vcpu_svm *svm)
6aa8b732 2458{
1edce0a9 2459 return kvm_emulate_rdmsr(&svm->vcpu);
6aa8b732
AK
2460}
2461
4a810181
JR
2462static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2463{
2464 struct vcpu_svm *svm = to_svm(vcpu);
2465 int svm_dis, chg_mask;
2466
2467 if (data & ~SVM_VM_CR_VALID_MASK)
2468 return 1;
2469
2470 chg_mask = SVM_VM_CR_VALID_MASK;
2471
2472 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2473 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2474
2475 svm->nested.vm_cr_msr &= ~chg_mask;
2476 svm->nested.vm_cr_msr |= (data & chg_mask);
2477
2478 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2479
2480 /* check for svm_disable while efer.svme is set */
2481 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2482 return 1;
2483
2484 return 0;
2485}
2486
8fe8ab46 2487static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
6aa8b732 2488{
a2fa3e9f
GH
2489 struct vcpu_svm *svm = to_svm(vcpu);
2490
8fe8ab46
WA
2491 u32 ecx = msr->index;
2492 u64 data = msr->data;
6aa8b732 2493 switch (ecx) {
15038e14
PB
2494 case MSR_IA32_CR_PAT:
2495 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2496 return 1;
2497 vcpu->arch.pat = data;
2498 svm->vmcb->save.g_pat = data;
2499 mark_dirty(svm->vmcb, VMCB_NPT);
2500 break;
b2ac58f9
KA
2501 case MSR_IA32_SPEC_CTRL:
2502 if (!msr->host_initiated &&
df7e8818
PB
2503 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
2504 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
6ac2f49e
KRW
2505 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
2506 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
b2ac58f9
KA
2507 return 1;
2508
6441fa61 2509 if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
b2ac58f9
KA
2510 return 1;
2511
2512 svm->spec_ctrl = data;
b2ac58f9
KA
2513 if (!data)
2514 break;
2515
2516 /*
2517 * For non-nested:
2518 * When it's written (to non-zero) for the first time, pass
2519 * it through.
2520 *
2521 * For nested:
2522 * The handling of the MSR bitmap for L2 guests is done in
2523 * nested_svm_vmrun_msrpm.
2524 * We update the L1 MSR bit as well since it will end up
2525 * touching the MSR anyway now.
2526 */
2527 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2528 break;
15d45071
AR
2529 case MSR_IA32_PRED_CMD:
2530 if (!msr->host_initiated &&
e7c587da 2531 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
15d45071
AR
2532 return 1;
2533
2534 if (data & ~PRED_CMD_IBPB)
2535 return 1;
6441fa61
PB
2536 if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
2537 return 1;
15d45071
AR
2538 if (!data)
2539 break;
2540
2541 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
15d45071
AR
2542 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
2543 break;
bc226f07
TL
2544 case MSR_AMD64_VIRT_SPEC_CTRL:
2545 if (!msr->host_initiated &&
2546 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2547 return 1;
2548
2549 if (data & ~SPEC_CTRL_SSBD)
2550 return 1;
2551
2552 svm->virt_spec_ctrl = data;
2553 break;
8c06585d 2554 case MSR_STAR:
a2fa3e9f 2555 svm->vmcb->save.star = data;
6aa8b732 2556 break;
49b14f24 2557#ifdef CONFIG_X86_64
6aa8b732 2558 case MSR_LSTAR:
a2fa3e9f 2559 svm->vmcb->save.lstar = data;
6aa8b732
AK
2560 break;
2561 case MSR_CSTAR:
a2fa3e9f 2562 svm->vmcb->save.cstar = data;
6aa8b732
AK
2563 break;
2564 case MSR_KERNEL_GS_BASE:
a2fa3e9f 2565 svm->vmcb->save.kernel_gs_base = data;
6aa8b732
AK
2566 break;
2567 case MSR_SYSCALL_MASK:
a2fa3e9f 2568 svm->vmcb->save.sfmask = data;
6aa8b732
AK
2569 break;
2570#endif
2571 case MSR_IA32_SYSENTER_CS:
a2fa3e9f 2572 svm->vmcb->save.sysenter_cs = data;
6aa8b732
AK
2573 break;
2574 case MSR_IA32_SYSENTER_EIP:
017cb99e 2575 svm->sysenter_eip = data;
a2fa3e9f 2576 svm->vmcb->save.sysenter_eip = data;
6aa8b732
AK
2577 break;
2578 case MSR_IA32_SYSENTER_ESP:
017cb99e 2579 svm->sysenter_esp = data;
a2fa3e9f 2580 svm->vmcb->save.sysenter_esp = data;
6aa8b732 2581 break;
46896c73
PB
2582 case MSR_TSC_AUX:
2583 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2584 return 1;
2585
2586 /*
2587 * This is rare, so we update the MSR here instead of using
2588 * direct_access_msrs. Doing that would require a rdmsr in
2589 * svm_vcpu_put.
2590 */
2591 svm->tsc_aux = data;
2592 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2593 break;
a2938c80 2594 case MSR_IA32_DEBUGCTLMSR:
2a6b20b8 2595 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
a737f256
CD
2596 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2597 __func__, data);
24e09cbf
JR
2598 break;
2599 }
2600 if (data & DEBUGCTL_RESERVED_BITS)
2601 return 1;
2602
2603 svm->vmcb->save.dbgctl = data;
b53ba3f9 2604 mark_dirty(svm->vmcb, VMCB_LBR);
24e09cbf
JR
2605 if (data & (1ULL<<0))
2606 svm_enable_lbrv(svm);
2607 else
2608 svm_disable_lbrv(svm);
a2938c80 2609 break;
b286d5d8 2610 case MSR_VM_HSAVE_PA:
e6aa9abd 2611 svm->nested.hsave_msr = data;
62b9abaa 2612 break;
3c5d0a44 2613 case MSR_VM_CR:
4a810181 2614 return svm_set_vm_cr(vcpu, data);
3c5d0a44 2615 case MSR_VM_IGNNE:
a737f256 2616 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3c5d0a44 2617 break;
d1d93fa9
TL
2618 case MSR_F10H_DECFG: {
2619 struct kvm_msr_entry msr_entry;
2620
2621 msr_entry.index = msr->index;
2622 if (svm_get_msr_feature(&msr_entry))
2623 return 1;
2624
2625 /* Check the supported bits */
2626 if (data & ~msr_entry.data)
2627 return 1;
2628
2629 /* Don't allow the guest to change a bit, #GP */
2630 if (!msr->host_initiated && (data ^ msr_entry.data))
2631 return 1;
2632
2633 svm->msr_decfg = data;
2634 break;
2635 }
44a95dae
SS
2636 case MSR_IA32_APICBASE:
2637 if (kvm_vcpu_apicv_active(vcpu))
2638 avic_update_vapic_bar(to_svm(vcpu), data);
b2869f28 2639 /* Fall through */
6aa8b732 2640 default:
8fe8ab46 2641 return kvm_set_msr_common(vcpu, msr);
6aa8b732
AK
2642 }
2643 return 0;
2644}
2645
851ba692 2646static int wrmsr_interception(struct vcpu_svm *svm)
6aa8b732 2647{
1edce0a9 2648 return kvm_emulate_wrmsr(&svm->vcpu);
6aa8b732
AK
2649}
2650
851ba692 2651static int msr_interception(struct vcpu_svm *svm)
6aa8b732 2652{
e756fc62 2653 if (svm->vmcb->control.exit_info_1)
851ba692 2654 return wrmsr_interception(svm);
6aa8b732 2655 else
851ba692 2656 return rdmsr_interception(svm);
6aa8b732
AK
2657}
2658
851ba692 2659static int interrupt_window_interception(struct vcpu_svm *svm)
c1150d8c 2660{
3842d135 2661 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
f0b85051 2662 svm_clear_vintr(svm);
f3515dc3
SS
2663
2664 /*
2665 * For AVIC, the only reason to end up here is ExtINTs.
2666 * In this case AVIC was temporarily disabled for
2667 * requesting the IRQ window and we have to re-enable it.
2668 */
2669 svm_toggle_avic_for_irq_window(&svm->vcpu, true);
2670
85f455f7 2671 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
decdbf6a 2672 mark_dirty(svm->vmcb, VMCB_INTR);
675acb75 2673 ++svm->vcpu.stat.irq_window_exits;
c1150d8c
DL
2674 return 1;
2675}
2676
565d0998
ML
2677static int pause_interception(struct vcpu_svm *svm)
2678{
de63ad4c
LM
2679 struct kvm_vcpu *vcpu = &svm->vcpu;
2680 bool in_kernel = (svm_get_cpl(vcpu) == 0);
2681
8566ac8b
BM
2682 if (pause_filter_thresh)
2683 grow_ple_window(vcpu);
2684
de63ad4c 2685 kvm_vcpu_on_spin(vcpu, in_kernel);
565d0998
ML
2686 return 1;
2687}
2688
87c00572
GS
2689static int nop_interception(struct vcpu_svm *svm)
2690{
b742c1e6 2691 return kvm_skip_emulated_instruction(&(svm->vcpu));
87c00572
GS
2692}
2693
2694static int monitor_interception(struct vcpu_svm *svm)
2695{
2696 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
2697 return nop_interception(svm);
2698}
2699
2700static int mwait_interception(struct vcpu_svm *svm)
2701{
2702 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
2703 return nop_interception(svm);
2704}
2705
09941fbb 2706static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
7ff76d58
AP
2707 [SVM_EXIT_READ_CR0] = cr_interception,
2708 [SVM_EXIT_READ_CR3] = cr_interception,
2709 [SVM_EXIT_READ_CR4] = cr_interception,
2710 [SVM_EXIT_READ_CR8] = cr_interception,
5e57518d 2711 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
628afd2a 2712 [SVM_EXIT_WRITE_CR0] = cr_interception,
7ff76d58
AP
2713 [SVM_EXIT_WRITE_CR3] = cr_interception,
2714 [SVM_EXIT_WRITE_CR4] = cr_interception,
e0231715 2715 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
cae3797a
AP
2716 [SVM_EXIT_READ_DR0] = dr_interception,
2717 [SVM_EXIT_READ_DR1] = dr_interception,
2718 [SVM_EXIT_READ_DR2] = dr_interception,
2719 [SVM_EXIT_READ_DR3] = dr_interception,
2720 [SVM_EXIT_READ_DR4] = dr_interception,
2721 [SVM_EXIT_READ_DR5] = dr_interception,
2722 [SVM_EXIT_READ_DR6] = dr_interception,
2723 [SVM_EXIT_READ_DR7] = dr_interception,
2724 [SVM_EXIT_WRITE_DR0] = dr_interception,
2725 [SVM_EXIT_WRITE_DR1] = dr_interception,
2726 [SVM_EXIT_WRITE_DR2] = dr_interception,
2727 [SVM_EXIT_WRITE_DR3] = dr_interception,
2728 [SVM_EXIT_WRITE_DR4] = dr_interception,
2729 [SVM_EXIT_WRITE_DR5] = dr_interception,
2730 [SVM_EXIT_WRITE_DR6] = dr_interception,
2731 [SVM_EXIT_WRITE_DR7] = dr_interception,
d0bfb940
JK
2732 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2733 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
7aa81cc0 2734 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
e0231715 2735 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
e0231715 2736 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
54a20552 2737 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
9718420e 2738 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
e0231715 2739 [SVM_EXIT_INTR] = intr_interception,
c47f098d 2740 [SVM_EXIT_NMI] = nmi_interception,
6aa8b732
AK
2741 [SVM_EXIT_SMI] = nop_on_interception,
2742 [SVM_EXIT_INIT] = nop_on_interception,
c1150d8c 2743 [SVM_EXIT_VINTR] = interrupt_window_interception,
332b56e4 2744 [SVM_EXIT_RDPMC] = rdpmc_interception,
6aa8b732 2745 [SVM_EXIT_CPUID] = cpuid_interception,
95ba8273 2746 [SVM_EXIT_IRET] = iret_interception,
cf5a94d1 2747 [SVM_EXIT_INVD] = emulate_on_interception,
565d0998 2748 [SVM_EXIT_PAUSE] = pause_interception,
6aa8b732 2749 [SVM_EXIT_HLT] = halt_interception,
a7052897 2750 [SVM_EXIT_INVLPG] = invlpg_interception,
ff092385 2751 [SVM_EXIT_INVLPGA] = invlpga_interception,
e0231715 2752 [SVM_EXIT_IOIO] = io_interception,
6aa8b732
AK
2753 [SVM_EXIT_MSR] = msr_interception,
2754 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
46fe4ddd 2755 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3d6368ef 2756 [SVM_EXIT_VMRUN] = vmrun_interception,
02e235bc 2757 [SVM_EXIT_VMMCALL] = vmmcall_interception,
5542675b
AG
2758 [SVM_EXIT_VMLOAD] = vmload_interception,
2759 [SVM_EXIT_VMSAVE] = vmsave_interception,
1371d904
AG
2760 [SVM_EXIT_STGI] = stgi_interception,
2761 [SVM_EXIT_CLGI] = clgi_interception,
532a46b9 2762 [SVM_EXIT_SKINIT] = skinit_interception,
dab429a7 2763 [SVM_EXIT_WBINVD] = wbinvd_interception,
87c00572
GS
2764 [SVM_EXIT_MONITOR] = monitor_interception,
2765 [SVM_EXIT_MWAIT] = mwait_interception,
81dd35d4 2766 [SVM_EXIT_XSETBV] = xsetbv_interception,
0cb8410b 2767 [SVM_EXIT_RDPRU] = rdpru_interception,
d0006530 2768 [SVM_EXIT_NPF] = npf_interception,
7607b717 2769 [SVM_EXIT_RSM] = rsm_interception,
18f40c53
SS
2770 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
2771 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
6aa8b732
AK
2772};
2773
ae8cc059 2774static void dump_vmcb(struct kvm_vcpu *vcpu)
3f10c846
JR
2775{
2776 struct vcpu_svm *svm = to_svm(vcpu);
2777 struct vmcb_control_area *control = &svm->vmcb->control;
2778 struct vmcb_save_area *save = &svm->vmcb->save;
2779
6f2f8453
PB
2780 if (!dump_invalid_vmcb) {
2781 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2782 return;
2783 }
2784
3f10c846 2785 pr_err("VMCB Control Area:\n");
ae8cc059
JP
2786 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
2787 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
2788 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
2789 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
2790 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
2791 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
2792 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
1d8fb44a
BM
2793 pr_err("%-20s%d\n", "pause filter threshold:",
2794 control->pause_filter_thresh);
ae8cc059
JP
2795 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
2796 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
2797 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
2798 pr_err("%-20s%d\n", "asid:", control->asid);
2799 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
2800 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
2801 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
2802 pr_err("%-20s%08x\n", "int_state:", control->int_state);
2803 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
2804 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
2805 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
2806 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
2807 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
2808 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
2809 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
44a95dae 2810 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
ae8cc059
JP
2811 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
2812 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
0dc92119 2813 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
ae8cc059 2814 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
44a95dae
SS
2815 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
2816 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
2817 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3f10c846 2818 pr_err("VMCB State Save Area:\n");
ae8cc059
JP
2819 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2820 "es:",
2821 save->es.selector, save->es.attrib,
2822 save->es.limit, save->es.base);
2823 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2824 "cs:",
2825 save->cs.selector, save->cs.attrib,
2826 save->cs.limit, save->cs.base);
2827 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2828 "ss:",
2829 save->ss.selector, save->ss.attrib,
2830 save->ss.limit, save->ss.base);
2831 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2832 "ds:",
2833 save->ds.selector, save->ds.attrib,
2834 save->ds.limit, save->ds.base);
2835 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2836 "fs:",
2837 save->fs.selector, save->fs.attrib,
2838 save->fs.limit, save->fs.base);
2839 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2840 "gs:",
2841 save->gs.selector, save->gs.attrib,
2842 save->gs.limit, save->gs.base);
2843 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2844 "gdtr:",
2845 save->gdtr.selector, save->gdtr.attrib,
2846 save->gdtr.limit, save->gdtr.base);
2847 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2848 "ldtr:",
2849 save->ldtr.selector, save->ldtr.attrib,
2850 save->ldtr.limit, save->ldtr.base);
2851 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2852 "idtr:",
2853 save->idtr.selector, save->idtr.attrib,
2854 save->idtr.limit, save->idtr.base);
2855 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2856 "tr:",
2857 save->tr.selector, save->tr.attrib,
2858 save->tr.limit, save->tr.base);
3f10c846
JR
2859 pr_err("cpl: %d efer: %016llx\n",
2860 save->cpl, save->efer);
ae8cc059
JP
2861 pr_err("%-15s %016llx %-13s %016llx\n",
2862 "cr0:", save->cr0, "cr2:", save->cr2);
2863 pr_err("%-15s %016llx %-13s %016llx\n",
2864 "cr3:", save->cr3, "cr4:", save->cr4);
2865 pr_err("%-15s %016llx %-13s %016llx\n",
2866 "dr6:", save->dr6, "dr7:", save->dr7);
2867 pr_err("%-15s %016llx %-13s %016llx\n",
2868 "rip:", save->rip, "rflags:", save->rflags);
2869 pr_err("%-15s %016llx %-13s %016llx\n",
2870 "rsp:", save->rsp, "rax:", save->rax);
2871 pr_err("%-15s %016llx %-13s %016llx\n",
2872 "star:", save->star, "lstar:", save->lstar);
2873 pr_err("%-15s %016llx %-13s %016llx\n",
2874 "cstar:", save->cstar, "sfmask:", save->sfmask);
2875 pr_err("%-15s %016llx %-13s %016llx\n",
2876 "kernel_gs_base:", save->kernel_gs_base,
2877 "sysenter_cs:", save->sysenter_cs);
2878 pr_err("%-15s %016llx %-13s %016llx\n",
2879 "sysenter_esp:", save->sysenter_esp,
2880 "sysenter_eip:", save->sysenter_eip);
2881 pr_err("%-15s %016llx %-13s %016llx\n",
2882 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
2883 pr_err("%-15s %016llx %-13s %016llx\n",
2884 "br_from:", save->br_from, "br_to:", save->br_to);
2885 pr_err("%-15s %016llx %-13s %016llx\n",
2886 "excp_from:", save->last_excp_from,
2887 "excp_to:", save->last_excp_to);
3f10c846
JR
2888}
2889
586f9607
AK
2890static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
2891{
2892 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
2893
2894 *info1 = control->exit_info_1;
2895 *info2 = control->exit_info_2;
2896}
2897
1e9e2622
WL
2898static int handle_exit(struct kvm_vcpu *vcpu,
2899 enum exit_fastpath_completion exit_fastpath)
6aa8b732 2900{
04d2cc77 2901 struct vcpu_svm *svm = to_svm(vcpu);
851ba692 2902 struct kvm_run *kvm_run = vcpu->run;
a2fa3e9f 2903 u32 exit_code = svm->vmcb->control.exit_code;
6aa8b732 2904
8b89fe1f
PB
2905 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
2906
4ee546b4 2907 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
2be4fc7a
JR
2908 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2909 if (npt_enabled)
2910 vcpu->arch.cr3 = svm->vmcb->save.cr3;
af9ca2d7 2911
cd3ff653
JR
2912 if (unlikely(svm->nested.exit_required)) {
2913 nested_svm_vmexit(svm);
2914 svm->nested.exit_required = false;
2915
2916 return 1;
2917 }
2918
2030753d 2919 if (is_guest_mode(vcpu)) {
410e4d57
JR
2920 int vmexit;
2921
d8cabddf
JR
2922 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
2923 svm->vmcb->control.exit_info_1,
2924 svm->vmcb->control.exit_info_2,
2925 svm->vmcb->control.exit_int_info,
e097e5ff
SH
2926 svm->vmcb->control.exit_int_info_err,
2927 KVM_ISA_SVM);
d8cabddf 2928
410e4d57
JR
2929 vmexit = nested_svm_exit_special(svm);
2930
2931 if (vmexit == NESTED_EXIT_CONTINUE)
2932 vmexit = nested_svm_exit_handled(svm);
2933
2934 if (vmexit == NESTED_EXIT_DONE)
cf74a78b 2935 return 1;
cf74a78b
AG
2936 }
2937
a5c3832d
JR
2938 svm_complete_interrupts(svm);
2939
04d2cc77
AK
2940 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2941 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2942 kvm_run->fail_entry.hardware_entry_failure_reason
2943 = svm->vmcb->control.exit_code;
3f10c846 2944 dump_vmcb(vcpu);
04d2cc77
AK
2945 return 0;
2946 }
2947
a2fa3e9f 2948 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
709ddebf 2949 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
55c5e464
JR
2950 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
2951 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
6614c7d0 2952 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
6aa8b732 2953 "exit_code 0x%x\n",
b8688d51 2954 __func__, svm->vmcb->control.exit_int_info,
6aa8b732
AK
2955 exit_code);
2956
1e9e2622
WL
2957 if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
2958 kvm_skip_emulated_instruction(vcpu);
2959 return 1;
2960 } else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
56919c5c 2961 || !svm_exit_handlers[exit_code]) {
7396d337
LA
2962 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
2963 dump_vmcb(vcpu);
2964 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2965 vcpu->run->internal.suberror =
2966 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2967 vcpu->run->internal.ndata = 1;
2968 vcpu->run->internal.data[0] = exit_code;
2969 return 0;
6aa8b732
AK
2970 }
2971
3dcb2a3f
AA
2972#ifdef CONFIG_RETPOLINE
2973 if (exit_code == SVM_EXIT_MSR)
2974 return msr_interception(svm);
2975 else if (exit_code == SVM_EXIT_VINTR)
2976 return interrupt_window_interception(svm);
2977 else if (exit_code == SVM_EXIT_INTR)
2978 return intr_interception(svm);
2979 else if (exit_code == SVM_EXIT_HLT)
2980 return halt_interception(svm);
2981 else if (exit_code == SVM_EXIT_NPF)
2982 return npf_interception(svm);
2983#endif
851ba692 2984 return svm_exit_handlers[exit_code](svm);
6aa8b732
AK
2985}
2986
2987static void reload_tss(struct kvm_vcpu *vcpu)
2988{
2989 int cpu = raw_smp_processor_id();
2990
0fe1e009
TH
2991 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2992 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
6aa8b732
AK
2993 load_TR_desc();
2994}
2995
e756fc62 2996static void pre_svm_run(struct vcpu_svm *svm)
6aa8b732
AK
2997{
2998 int cpu = raw_smp_processor_id();
2999
0fe1e009 3000 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
6aa8b732 3001
70cd94e6
BS
3002 if (sev_guest(svm->vcpu.kvm))
3003 return pre_sev_run(svm, cpu);
3004
4b656b12 3005 /* FIXME: handle wraparound of asid_generation */
0fe1e009
TH
3006 if (svm->asid_generation != sd->asid_generation)
3007 new_asid(svm, sd);
6aa8b732
AK
3008}
3009
95ba8273
GN
3010static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3011{
3012 struct vcpu_svm *svm = to_svm(vcpu);
3013
3014 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3015 vcpu->arch.hflags |= HF_NMI_MASK;
8a05a1b8 3016 set_intercept(svm, INTERCEPT_IRET);
95ba8273
GN
3017 ++vcpu->stat.nmi_injections;
3018}
6aa8b732 3019
66fd3f7f 3020static void svm_set_irq(struct kvm_vcpu *vcpu)
2a8067f1
ED
3021{
3022 struct vcpu_svm *svm = to_svm(vcpu);
3023
2af9194d 3024 BUG_ON(!(gif_set(svm)));
cf74a78b 3025
9fb2d2b4
GN
3026 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3027 ++vcpu->stat.irq_injections;
3028
219b65dc
AG
3029 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3030 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2a8067f1
ED
3031}
3032
95ba8273 3033static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
aaacfc9a
JR
3034{
3035 struct vcpu_svm *svm = to_svm(vcpu);
aaacfc9a 3036
49d654d8 3037 if (svm_nested_virtualize_tpr(vcpu))
88ab24ad
JR
3038 return;
3039
596f3142
RK
3040 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3041
95ba8273 3042 if (irr == -1)
aaacfc9a
JR
3043 return;
3044
95ba8273 3045 if (tpr >= irr)
4ee546b4 3046 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
95ba8273 3047}
aaacfc9a 3048
95ba8273
GN
3049static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3050{
3051 struct vcpu_svm *svm = to_svm(vcpu);
3052 struct vmcb *vmcb = svm->vmcb;
924584cc
JR
3053 int ret;
3054 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3055 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3056 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3057
3058 return ret;
aaacfc9a
JR
3059}
3060
3cfc3092
JK
3061static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3062{
3063 struct vcpu_svm *svm = to_svm(vcpu);
3064
3065 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3066}
3067
3068static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3069{
3070 struct vcpu_svm *svm = to_svm(vcpu);
3071
3072 if (masked) {
3073 svm->vcpu.arch.hflags |= HF_NMI_MASK;
8a05a1b8 3074 set_intercept(svm, INTERCEPT_IRET);
3cfc3092
JK
3075 } else {
3076 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
8a05a1b8 3077 clr_intercept(svm, INTERCEPT_IRET);
3cfc3092
JK
3078 }
3079}
3080
78646121
GN
3081static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3082{
3083 struct vcpu_svm *svm = to_svm(vcpu);
3084 struct vmcb *vmcb = svm->vmcb;
7fcdb510
JR
3085
3086 if (!gif_set(svm) ||
3087 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3088 return 0;
3089
b518ba9f
PB
3090 if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
3091 return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
3092 else
3093 return !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
78646121
GN
3094}
3095
c9a7953f 3096static void enable_irq_window(struct kvm_vcpu *vcpu)
6aa8b732 3097{
219b65dc 3098 struct vcpu_svm *svm = to_svm(vcpu);
219b65dc 3099
e0231715
JR
3100 /*
3101 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3102 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3103 * get that intercept, this function will be called again though and
640bd6e5
JN
3104 * we'll get the vintr intercept. However, if the vGIF feature is
3105 * enabled, the STGI interception will not occur. Enable the irq
3106 * window under the assumption that the hardware will set the GIF.
e0231715 3107 */
b518ba9f 3108 if (vgif_enabled(svm) || gif_set(svm)) {
f3515dc3
SS
3109 /*
3110 * IRQ window is not needed when AVIC is enabled,
3111 * unless we have pending ExtINT since it cannot be injected
3112 * via AVIC. In such case, we need to temporarily disable AVIC,
3113 * and fallback to injecting IRQ via V_IRQ.
3114 */
3115 svm_toggle_avic_for_irq_window(vcpu, false);
219b65dc 3116 svm_set_vintr(svm);
219b65dc 3117 }
85f455f7
ED
3118}
3119
c9a7953f 3120static void enable_nmi_window(struct kvm_vcpu *vcpu)
c1150d8c 3121{
04d2cc77 3122 struct vcpu_svm *svm = to_svm(vcpu);
c1150d8c 3123
44c11430
GN
3124 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3125 == HF_NMI_MASK)
c9a7953f 3126 return; /* IRET will cause a vm exit */
44c11430 3127
640bd6e5
JN
3128 if (!gif_set(svm)) {
3129 if (vgif_enabled(svm))
3130 set_intercept(svm, INTERCEPT_STGI);
1a5e1852 3131 return; /* STGI will cause a vm exit */
640bd6e5 3132 }
1a5e1852
LP
3133
3134 if (svm->nested.exit_required)
3135 return; /* we're not going to run the guest yet */
3136
e0231715
JR
3137 /*
3138 * Something prevents NMI from been injected. Single step over possible
3139 * problem (IRET or exception injection or interrupt shadow)
3140 */
ab2f4d73 3141 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
6be7d306 3142 svm->nmi_singlestep = true;
44c11430 3143 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
c1150d8c
DL
3144}
3145
cbc94022
IE
3146static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3147{
3148 return 0;
3149}
3150
2ac52ab8
SC
3151static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
3152{
3153 return 0;
3154}
3155
883b0a91 3156void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
d9e368d6 3157{
38e5e92f
JR
3158 struct vcpu_svm *svm = to_svm(vcpu);
3159
3160 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3161 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3162 else
3163 svm->asid_generation--;
d9e368d6
AK
3164}
3165
faff8758
JS
3166static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3167{
3168 struct vcpu_svm *svm = to_svm(vcpu);
3169
3170 invlpga(gva, svm->vmcb->control.asid);
3171}
3172
04d2cc77
AK
3173static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3174{
3175}
3176
d7bf8221
JR
3177static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3178{
3179 struct vcpu_svm *svm = to_svm(vcpu);
3180
3bbf3565 3181 if (svm_nested_virtualize_tpr(vcpu))
88ab24ad
JR
3182 return;
3183
4ee546b4 3184 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
d7bf8221 3185 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
615d5193 3186 kvm_set_cr8(vcpu, cr8);
d7bf8221
JR
3187 }
3188}
3189
649d6864
JR
3190static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3191{
3192 struct vcpu_svm *svm = to_svm(vcpu);
3193 u64 cr8;
3194
3bbf3565
SS
3195 if (svm_nested_virtualize_tpr(vcpu) ||
3196 kvm_vcpu_apicv_active(vcpu))
88ab24ad
JR
3197 return;
3198
649d6864
JR
3199 cr8 = kvm_get_cr8(vcpu);
3200 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3201 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3202}
3203
9222be18
GN
3204static void svm_complete_interrupts(struct vcpu_svm *svm)
3205{
3206 u8 vector;
3207 int type;
3208 u32 exitintinfo = svm->vmcb->control.exit_int_info;
66b7138f
JK
3209 unsigned int3_injected = svm->int3_injected;
3210
3211 svm->int3_injected = 0;
9222be18 3212
bd3d1ec3
AK
3213 /*
3214 * If we've made progress since setting HF_IRET_MASK, we've
3215 * executed an IRET and can allow NMI injection.
3216 */
3217 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3218 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
44c11430 3219 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3842d135
AK
3220 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3221 }
44c11430 3222
9222be18
GN
3223 svm->vcpu.arch.nmi_injected = false;
3224 kvm_clear_exception_queue(&svm->vcpu);
3225 kvm_clear_interrupt_queue(&svm->vcpu);
3226
3227 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3228 return;
3229
3842d135
AK
3230 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3231
9222be18
GN
3232 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3233 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3234
3235 switch (type) {
3236 case SVM_EXITINTINFO_TYPE_NMI:
3237 svm->vcpu.arch.nmi_injected = true;
3238 break;
3239 case SVM_EXITINTINFO_TYPE_EXEPT:
66b7138f
JK
3240 /*
3241 * In case of software exceptions, do not reinject the vector,
3242 * but re-execute the instruction instead. Rewind RIP first
3243 * if we emulated INT3 before.
3244 */
3245 if (kvm_exception_is_soft(vector)) {
3246 if (vector == BP_VECTOR && int3_injected &&
3247 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3248 kvm_rip_write(&svm->vcpu,
3249 kvm_rip_read(&svm->vcpu) -
3250 int3_injected);
9222be18 3251 break;
66b7138f 3252 }
9222be18
GN
3253 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3254 u32 err = svm->vmcb->control.exit_int_info_err;
ce7ddec4 3255 kvm_requeue_exception_e(&svm->vcpu, vector, err);
9222be18
GN
3256
3257 } else
ce7ddec4 3258 kvm_requeue_exception(&svm->vcpu, vector);
9222be18
GN
3259 break;
3260 case SVM_EXITINTINFO_TYPE_INTR:
66fd3f7f 3261 kvm_queue_interrupt(&svm->vcpu, vector, false);
9222be18
GN
3262 break;
3263 default:
3264 break;
3265 }
3266}
3267
b463a6f7
AK
3268static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3269{
3270 struct vcpu_svm *svm = to_svm(vcpu);
3271 struct vmcb_control_area *control = &svm->vmcb->control;
3272
3273 control->exit_int_info = control->event_inj;
3274 control->exit_int_info_err = control->event_inj_err;
3275 control->event_inj = 0;
3276 svm_complete_interrupts(svm);
3277}
3278
56a87e5d 3279void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
199cd1d7 3280
851ba692 3281static void svm_vcpu_run(struct kvm_vcpu *vcpu)
6aa8b732 3282{
a2fa3e9f 3283 struct vcpu_svm *svm = to_svm(vcpu);
d9e368d6 3284
2041a06a
JR
3285 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3286 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3287 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3288
cd3ff653
JR
3289 /*
3290 * A vmexit emulation is required before the vcpu can be executed
3291 * again.
3292 */
3293 if (unlikely(svm->nested.exit_required))
3294 return;
3295
a12713c2
LP
3296 /*
3297 * Disable singlestep if we're injecting an interrupt/exception.
3298 * We don't want our modified rflags to be pushed on the stack where
3299 * we might not be able to easily reset them if we disabled NMI
3300 * singlestep later.
3301 */
3302 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3303 /*
3304 * Event injection happens before external interrupts cause a
3305 * vmexit and interrupts are disabled here, so smp_send_reschedule
3306 * is enough to force an immediate vmexit.
3307 */
3308 disable_nmi_singlestep(svm);
3309 smp_send_reschedule(vcpu->cpu);
3310 }
3311
e756fc62 3312 pre_svm_run(svm);
6aa8b732 3313
649d6864
JR
3314 sync_lapic_to_cr8(vcpu);
3315
cda0ffdd 3316 svm->vmcb->save.cr2 = vcpu->arch.cr2;
6aa8b732 3317
04d2cc77 3318 clgi();
139a12cf 3319 kvm_load_guest_xsave_state(vcpu);
04d2cc77 3320
b6c4bc65
WL
3321 if (lapic_in_kernel(vcpu) &&
3322 vcpu->arch.apic->lapic_timer.timer_advance_ns)
3323 kvm_wait_lapic_expire(vcpu);
3324
b2ac58f9
KA
3325 /*
3326 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3327 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3328 * is no need to worry about the conditional branch over the wrmsr
3329 * being speculatively taken.
3330 */
ccbcd267 3331 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
b2ac58f9 3332
199cd1d7 3333 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
6aa8b732 3334
15e6c22f
TG
3335#ifdef CONFIG_X86_64
3336 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3337#else
3338 loadsegment(fs, svm->host.fs);
3339#ifndef CONFIG_X86_32_LAZY_GS
3340 loadsegment(gs, svm->host.gs);
3341#endif
3342#endif
3343
b2ac58f9
KA
3344 /*
3345 * We do not use IBRS in the kernel. If this vCPU has used the
3346 * SPEC_CTRL MSR it may have left it on; save the value and
3347 * turn it off. This is much more efficient than blindly adding
3348 * it to the atomic save/restore list. Especially as the former
3349 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3350 *
3351 * For non-nested case:
3352 * If the L01 MSR bitmap does not intercept the MSR, then we need to
3353 * save it.
3354 *
3355 * For nested case:
3356 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3357 * save it.
3358 */
946fbbc1 3359 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
ecb586bd 3360 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
b2ac58f9 3361
6aa8b732
AK
3362 reload_tss(vcpu);
3363
024d83ca
TG
3364 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3365
13c34e07
AK
3366 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3367 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3368 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3369 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3370
3781c01c 3371 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
dd60d217 3372 kvm_before_interrupt(&svm->vcpu);
3781c01c 3373
139a12cf 3374 kvm_load_host_xsave_state(vcpu);
3781c01c
JR
3375 stgi();
3376
3377 /* Any pending NMI will happen here */
3378
3379 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
dd60d217 3380 kvm_after_interrupt(&svm->vcpu);
3781c01c 3381
d7bf8221
JR
3382 sync_cr8_to_lapic(vcpu);
3383
a2fa3e9f 3384 svm->next_rip = 0;
9222be18 3385
38e5e92f
JR
3386 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3387
631bc487
GN
3388 /* if exit due to PF check for async PF */
3389 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
1261bfa3 3390 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
631bc487 3391
6de4f3ad
AK
3392 if (npt_enabled) {
3393 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3394 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3395 }
fe5913e4
JR
3396
3397 /*
3398 * We need to handle MC intercepts here before the vcpu has a chance to
3399 * change the physical cpu
3400 */
3401 if (unlikely(svm->vmcb->control.exit_code ==
3402 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3403 svm_handle_mce(svm);
8d28fec4
RJ
3404
3405 mark_all_clean(svm->vmcb);
6aa8b732
AK
3406}
3407
727a7e27 3408static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
6aa8b732 3409{
a2fa3e9f 3410 struct vcpu_svm *svm = to_svm(vcpu);
689f3bf2
PB
3411 bool update_guest_cr3 = true;
3412 unsigned long cr3;
a2fa3e9f 3413
689f3bf2
PB
3414 cr3 = __sme_set(root);
3415 if (npt_enabled) {
3416 svm->vmcb->control.nested_cr3 = cr3;
3417 mark_dirty(svm->vmcb, VMCB_NPT);
1c97f0a0 3418
689f3bf2
PB
3419 /* Loading L2's CR3 is handled by enter_svm_guest_mode. */
3420 if (is_guest_mode(vcpu))
3421 update_guest_cr3 = false;
3422 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3423 cr3 = vcpu->arch.cr3;
3424 else /* CR3 is already up-to-date. */
3425 update_guest_cr3 = false;
3426 }
1c97f0a0 3427
689f3bf2
PB
3428 if (update_guest_cr3) {
3429 svm->vmcb->save.cr3 = cr3;
3430 mark_dirty(svm->vmcb, VMCB_CR);
3431 }
1c97f0a0
JR
3432}
3433
6aa8b732
AK
3434static int is_disabled(void)
3435{
6031a61c
JR
3436 u64 vm_cr;
3437
3438 rdmsrl(MSR_VM_CR, vm_cr);
3439 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3440 return 1;
3441
6aa8b732
AK
3442 return 0;
3443}
3444
102d8325
IM
3445static void
3446svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3447{
3448 /*
3449 * Patch in the VMMCALL instruction:
3450 */
3451 hypercall[0] = 0x0f;
3452 hypercall[1] = 0x01;
3453 hypercall[2] = 0xd9;
102d8325
IM
3454}
3455
f257d6dc 3456static int __init svm_check_processor_compat(void)
002c7f7c 3457{
f257d6dc 3458 return 0;
002c7f7c
YS
3459}
3460
774ead3a
AK
3461static bool svm_cpu_has_accelerated_tpr(void)
3462{
3463 return false;
3464}
3465
bc226f07 3466static bool svm_has_emulated_msr(int index)
6d396b55 3467{
e87555e5
VK
3468 switch (index) {
3469 case MSR_IA32_MCG_EXT_CTL:
95c5c7c7 3470 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
e87555e5
VK
3471 return false;
3472 default:
3473 break;
3474 }
3475
6d396b55
PB
3476 return true;
3477}
3478
fc07e76a
PB
3479static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3480{
3481 return 0;
3482}
3483
0e851880
SY
3484static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3485{
6092d3d3
JR
3486 struct vcpu_svm *svm = to_svm(vcpu);
3487
7204160e 3488 vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
96be4e06 3489 boot_cpu_has(X86_FEATURE_XSAVE) &&
7204160e
AL
3490 boot_cpu_has(X86_FEATURE_XSAVES);
3491
6092d3d3 3492 /* Update nrips enabled cache */
4eb87460
SC
3493 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
3494 guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
46781eae
SS
3495
3496 if (!kvm_vcpu_apicv_active(vcpu))
3497 return;
3498
cc7f5577
OU
3499 /*
3500 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
3501 * is exposed to the guest, disable AVIC.
3502 */
3503 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
3504 kvm_request_apicv_update(vcpu->kvm, false,
3505 APICV_INHIBIT_REASON_X2APIC);
9a0bf054
SS
3506
3507 /*
3508 * Currently, AVIC does not work with nested virtualization.
3509 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
3510 */
3511 if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
3512 kvm_request_apicv_update(vcpu->kvm, false,
3513 APICV_INHIBIT_REASON_NESTED);
0e851880
SY
3514}
3515
f5f48ee1
SY
3516static bool svm_has_wbinvd_exit(void)
3517{
3518 return true;
3519}
3520
8061252e 3521#define PRE_EX(exit) { .exit_code = (exit), \
40e19b51 3522 .stage = X86_ICPT_PRE_EXCEPT, }
cfec82cb 3523#define POST_EX(exit) { .exit_code = (exit), \
40e19b51 3524 .stage = X86_ICPT_POST_EXCEPT, }
d7eb8203 3525#define POST_MEM(exit) { .exit_code = (exit), \
40e19b51 3526 .stage = X86_ICPT_POST_MEMACCESS, }
cfec82cb 3527
09941fbb 3528static const struct __x86_intercept {
cfec82cb
JR
3529 u32 exit_code;
3530 enum x86_intercept_stage stage;
cfec82cb
JR
3531} x86_intercept_map[] = {
3532 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
3533 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
3534 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
3535 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
3536 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
3b88e41a
JR
3537 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
3538 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
dee6bb70
JR
3539 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
3540 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
3541 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
3542 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
3543 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
3544 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
3545 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
3546 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
01de8b09
JR
3547 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
3548 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
3549 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
3550 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
3551 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
3552 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
3553 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
3554 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
d7eb8203
JR
3555 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
3556 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
3557 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
8061252e
JR
3558 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
3559 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
3560 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
3561 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
3562 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
3563 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
3564 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
3565 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
3566 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
bf608f88
JR
3567 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
3568 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
3569 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
3570 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
3571 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
3572 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
3573 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
f6511935
JR
3574 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
3575 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
3576 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
3577 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
02d4160f 3578 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
cfec82cb
JR
3579};
3580
8061252e 3581#undef PRE_EX
cfec82cb 3582#undef POST_EX
d7eb8203 3583#undef POST_MEM
cfec82cb 3584
8a76d7f2
JR
3585static int svm_check_intercept(struct kvm_vcpu *vcpu,
3586 struct x86_instruction_info *info,
21f1b8f2
SC
3587 enum x86_intercept_stage stage,
3588 struct x86_exception *exception)
8a76d7f2 3589{
cfec82cb
JR
3590 struct vcpu_svm *svm = to_svm(vcpu);
3591 int vmexit, ret = X86EMUL_CONTINUE;
3592 struct __x86_intercept icpt_info;
3593 struct vmcb *vmcb = svm->vmcb;
3594
3595 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3596 goto out;
3597
3598 icpt_info = x86_intercept_map[info->intercept];
3599
40e19b51 3600 if (stage != icpt_info.stage)
cfec82cb
JR
3601 goto out;
3602
3603 switch (icpt_info.exit_code) {
3604 case SVM_EXIT_READ_CR0:
3605 if (info->intercept == x86_intercept_cr_read)
3606 icpt_info.exit_code += info->modrm_reg;
3607 break;
3608 case SVM_EXIT_WRITE_CR0: {
3609 unsigned long cr0, val;
3610 u64 intercept;
3611
3612 if (info->intercept == x86_intercept_cr_write)
3613 icpt_info.exit_code += info->modrm_reg;
3614
62baf44c
JK
3615 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
3616 info->intercept == x86_intercept_clts)
cfec82cb
JR
3617 break;
3618
3619 intercept = svm->nested.intercept;
3620
3621 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
3622 break;
3623
3624 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
3625 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
3626
3627 if (info->intercept == x86_intercept_lmsw) {
3628 cr0 &= 0xfUL;
3629 val &= 0xfUL;
3630 /* lmsw can't clear PE - catch this here */
3631 if (cr0 & X86_CR0_PE)
3632 val |= X86_CR0_PE;
3633 }
3634
3635 if (cr0 ^ val)
3636 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3637
3638 break;
3639 }
3b88e41a
JR
3640 case SVM_EXIT_READ_DR0:
3641 case SVM_EXIT_WRITE_DR0:
3642 icpt_info.exit_code += info->modrm_reg;
3643 break;
8061252e
JR
3644 case SVM_EXIT_MSR:
3645 if (info->intercept == x86_intercept_wrmsr)
3646 vmcb->control.exit_info_1 = 1;
3647 else
3648 vmcb->control.exit_info_1 = 0;
3649 break;
bf608f88
JR
3650 case SVM_EXIT_PAUSE:
3651 /*
3652 * We get this for NOP only, but pause
3653 * is rep not, check this here
3654 */
3655 if (info->rep_prefix != REPE_PREFIX)
3656 goto out;
49a8afca 3657 break;
f6511935
JR
3658 case SVM_EXIT_IOIO: {
3659 u64 exit_info;
3660 u32 bytes;
3661
f6511935
JR
3662 if (info->intercept == x86_intercept_in ||
3663 info->intercept == x86_intercept_ins) {
6cbc5f5a
JK
3664 exit_info = ((info->src_val & 0xffff) << 16) |
3665 SVM_IOIO_TYPE_MASK;
f6511935 3666 bytes = info->dst_bytes;
6493f157 3667 } else {
6cbc5f5a 3668 exit_info = (info->dst_val & 0xffff) << 16;
6493f157 3669 bytes = info->src_bytes;
f6511935
JR
3670 }
3671
3672 if (info->intercept == x86_intercept_outs ||
3673 info->intercept == x86_intercept_ins)
3674 exit_info |= SVM_IOIO_STR_MASK;
3675
3676 if (info->rep_prefix)
3677 exit_info |= SVM_IOIO_REP_MASK;
3678
3679 bytes = min(bytes, 4u);
3680
3681 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
3682
3683 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
3684
3685 vmcb->control.exit_info_1 = exit_info;
3686 vmcb->control.exit_info_2 = info->next_rip;
3687
3688 break;
3689 }
cfec82cb
JR
3690 default:
3691 break;
3692 }
3693
f104765b
BD
3694 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
3695 if (static_cpu_has(X86_FEATURE_NRIPS))
3696 vmcb->control.next_rip = info->next_rip;
cfec82cb
JR
3697 vmcb->control.exit_code = icpt_info.exit_code;
3698 vmexit = nested_svm_exit_handled(svm);
3699
3700 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
3701 : X86EMUL_CONTINUE;
3702
3703out:
3704 return ret;
8a76d7f2
JR
3705}
3706
1e9e2622
WL
3707static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
3708 enum exit_fastpath_completion *exit_fastpath)
a547c6db 3709{
1e9e2622 3710 if (!is_guest_mode(vcpu) &&
aaca2100
HL
3711 to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3712 to_svm(vcpu)->vmcb->control.exit_info_1)
1e9e2622 3713 *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
a547c6db
YZ
3714}
3715
ae97a3b8
RK
3716static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
3717{
8566ac8b
BM
3718 if (pause_filter_thresh)
3719 shrink_ple_window(vcpu);
ae97a3b8
RK
3720}
3721
74f16909
BP
3722static void svm_setup_mce(struct kvm_vcpu *vcpu)
3723{
3724 /* [63:9] are reserved. */
3725 vcpu->arch.mcg_cap &= 0x1ff;
3726}
3727
72d7b374
LP
3728static int svm_smi_allowed(struct kvm_vcpu *vcpu)
3729{
05cade71
LP
3730 struct vcpu_svm *svm = to_svm(vcpu);
3731
3732 /* Per APM Vol.2 15.22.2 "Response to SMI" */
3733 if (!gif_set(svm))
3734 return 0;
3735
3736 if (is_guest_mode(&svm->vcpu) &&
3737 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
3738 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
3739 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
3740 svm->nested.exit_required = true;
3741 return 0;
3742 }
3743
72d7b374
LP
3744 return 1;
3745}
3746
0234bf88
LP
3747static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
3748{
05cade71
LP
3749 struct vcpu_svm *svm = to_svm(vcpu);
3750 int ret;
3751
3752 if (is_guest_mode(vcpu)) {
3753 /* FED8h - SVM Guest */
3754 put_smstate(u64, smstate, 0x7ed8, 1);
3755 /* FEE0h - SVM Guest VMCB Physical Address */
3756 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
3757
3758 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3759 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3760 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3761
3762 ret = nested_svm_vmexit(svm);
3763 if (ret)
3764 return ret;
3765 }
0234bf88
LP
3766 return 0;
3767}
3768
ed19321f 3769static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
0234bf88 3770{
05cade71
LP
3771 struct vcpu_svm *svm = to_svm(vcpu);
3772 struct vmcb *nested_vmcb;
8c5fbf1a 3773 struct kvm_host_map map;
ed19321f
SC
3774 u64 guest;
3775 u64 vmcb;
05cade71 3776
ed19321f
SC
3777 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
3778 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
05cade71 3779
ed19321f 3780 if (guest) {
8c5fbf1a 3781 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
9ec19493 3782 return 1;
8c5fbf1a
KA
3783 nested_vmcb = map.hva;
3784 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
05cade71 3785 }
9ec19493 3786 return 0;
0234bf88
LP
3787}
3788
cc3d967f
LP
3789static int enable_smi_window(struct kvm_vcpu *vcpu)
3790{
3791 struct vcpu_svm *svm = to_svm(vcpu);
3792
3793 if (!gif_set(svm)) {
3794 if (vgif_enabled(svm))
3795 set_intercept(svm, INTERCEPT_STGI);
3796 /* STGI will cause a vm exit */
3797 return 1;
3798 }
3799 return 0;
3800}
3801
05d5a486
SB
3802static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
3803{
118154bd
LA
3804 unsigned long cr4 = kvm_read_cr4(vcpu);
3805 bool smep = cr4 & X86_CR4_SMEP;
3806 bool smap = cr4 & X86_CR4_SMAP;
3807 bool is_user = svm_get_cpl(vcpu) == 3;
05d5a486
SB
3808
3809 /*
118154bd
LA
3810 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
3811 *
3812 * Errata:
3813 * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
3814 * possible that CPU microcode implementing DecodeAssist will fail
3815 * to read bytes of instruction which caused #NPF. In this case,
3816 * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
3817 * return 0 instead of the correct guest instruction bytes.
3818 *
3819 * This happens because CPU microcode reading instruction bytes
3820 * uses a special opcode which attempts to read data using CPL=0
3821 * priviledges. The microcode reads CS:RIP and if it hits a SMAP
3822 * fault, it gives up and returns no instruction bytes.
3823 *
3824 * Detection:
3825 * We reach here in case CPU supports DecodeAssist, raised #NPF and
3826 * returned 0 in GuestIntrBytes field of the VMCB.
3827 * First, errata can only be triggered in case vCPU CR4.SMAP=1.
3828 * Second, if vCPU CR4.SMEP=1, errata could only be triggered
3829 * in case vCPU CPL==3 (Because otherwise guest would have triggered
3830 * a SMEP fault instead of #NPF).
3831 * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
3832 * As most guests enable SMAP if they have also enabled SMEP, use above
3833 * logic in order to attempt minimize false-positive of detecting errata
3834 * while still preserving all cases semantic correctness.
3835 *
3836 * Workaround:
3837 * To determine what instruction the guest was executing, the hypervisor
3838 * will have to decode the instruction at the instruction pointer.
05d5a486
SB
3839 *
3840 * In non SEV guest, hypervisor will be able to read the guest
3841 * memory to decode the instruction pointer when insn_len is zero
3842 * so we return true to indicate that decoding is possible.
3843 *
3844 * But in the SEV guest, the guest memory is encrypted with the
3845 * guest specific key and hypervisor will not be able to decode the
3846 * instruction pointer so we will not able to workaround it. Lets
3847 * print the error and request to kill the guest.
3848 */
118154bd 3849 if (smap && (!smep || is_user)) {
05d5a486
SB
3850 if (!sev_guest(vcpu->kvm))
3851 return true;
3852
118154bd 3853 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
05d5a486
SB
3854 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3855 }
3856
3857 return false;
3858}
3859
4b9852f4
LA
3860static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
3861{
3862 struct vcpu_svm *svm = to_svm(vcpu);
3863
3864 /*
3865 * TODO: Last condition latch INIT signals on vCPU when
3866 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
3867 * To properly emulate the INIT intercept, SVM should implement
afaf0b2f 3868 * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
4b9852f4
LA
3869 * there if an INIT signal is pending.
3870 */
3871 return !gif_set(svm) ||
3872 (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
3873}
3874
eaf78265
JR
3875static void svm_vm_destroy(struct kvm *kvm)
3876{
3877 avic_vm_destroy(kvm);
3878 sev_vm_destroy(kvm);
3879}
3880
3881static int svm_vm_init(struct kvm *kvm)
3882{
3883 if (avic) {
3884 int ret = avic_vm_init(kvm);
3885 if (ret)
3886 return ret;
3887 }
3888
3889 kvm_apicv_init(kvm, avic);
3890 return 0;
3891}
3892
9c14ee21 3893static struct kvm_x86_ops svm_x86_ops __initdata = {
dd58f3c9 3894 .hardware_unsetup = svm_hardware_teardown,
6aa8b732
AK
3895 .hardware_enable = svm_hardware_enable,
3896 .hardware_disable = svm_hardware_disable,
774ead3a 3897 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
bc226f07 3898 .has_emulated_msr = svm_has_emulated_msr,
6aa8b732
AK
3899
3900 .vcpu_create = svm_create_vcpu,
3901 .vcpu_free = svm_free_vcpu,
04d2cc77 3902 .vcpu_reset = svm_vcpu_reset,
6aa8b732 3903
562b6b08 3904 .vm_size = sizeof(struct kvm_svm),
4e19c36f 3905 .vm_init = svm_vm_init,
1654efcb 3906 .vm_destroy = svm_vm_destroy,
44a95dae 3907
04d2cc77 3908 .prepare_guest_switch = svm_prepare_guest_switch,
6aa8b732
AK
3909 .vcpu_load = svm_vcpu_load,
3910 .vcpu_put = svm_vcpu_put,
8221c137
SS
3911 .vcpu_blocking = svm_vcpu_blocking,
3912 .vcpu_unblocking = svm_vcpu_unblocking,
6aa8b732 3913
a96036b8 3914 .update_bp_intercept = update_bp_intercept,
801e459a 3915 .get_msr_feature = svm_get_msr_feature,
6aa8b732
AK
3916 .get_msr = svm_get_msr,
3917 .set_msr = svm_set_msr,
3918 .get_segment_base = svm_get_segment_base,
3919 .get_segment = svm_get_segment,
3920 .set_segment = svm_set_segment,
2e4d2653 3921 .get_cpl = svm_get_cpl,
1747fb71 3922 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
e8467fda 3923 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
25c4c276 3924 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
6aa8b732 3925 .set_cr0 = svm_set_cr0,
6aa8b732
AK
3926 .set_cr4 = svm_set_cr4,
3927 .set_efer = svm_set_efer,
3928 .get_idt = svm_get_idt,
3929 .set_idt = svm_set_idt,
3930 .get_gdt = svm_get_gdt,
3931 .set_gdt = svm_set_gdt,
73aaf249
JK
3932 .get_dr6 = svm_get_dr6,
3933 .set_dr6 = svm_set_dr6,
020df079 3934 .set_dr7 = svm_set_dr7,
facb0139 3935 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
6de4f3ad 3936 .cache_reg = svm_cache_reg,
6aa8b732
AK
3937 .get_rflags = svm_get_rflags,
3938 .set_rflags = svm_set_rflags,
be94f6b7 3939
6aa8b732 3940 .tlb_flush = svm_flush_tlb,
faff8758 3941 .tlb_flush_gva = svm_flush_tlb_gva,
6aa8b732 3942
6aa8b732 3943 .run = svm_vcpu_run,
04d2cc77 3944 .handle_exit = handle_exit,
6aa8b732 3945 .skip_emulated_instruction = skip_emulated_instruction,
5ef8acbd 3946 .update_emulated_instruction = NULL,
2809f5d2
GC
3947 .set_interrupt_shadow = svm_set_interrupt_shadow,
3948 .get_interrupt_shadow = svm_get_interrupt_shadow,
102d8325 3949 .patch_hypercall = svm_patch_hypercall,
2a8067f1 3950 .set_irq = svm_set_irq,
95ba8273 3951 .set_nmi = svm_inject_nmi,
298101da 3952 .queue_exception = svm_queue_exception,
b463a6f7 3953 .cancel_injection = svm_cancel_injection,
78646121 3954 .interrupt_allowed = svm_interrupt_allowed,
95ba8273 3955 .nmi_allowed = svm_nmi_allowed,
3cfc3092
JK
3956 .get_nmi_mask = svm_get_nmi_mask,
3957 .set_nmi_mask = svm_set_nmi_mask,
95ba8273
GN
3958 .enable_nmi_window = enable_nmi_window,
3959 .enable_irq_window = enable_irq_window,
3960 .update_cr8_intercept = update_cr8_intercept,
8d860bbe 3961 .set_virtual_apic_mode = svm_set_virtual_apic_mode,
d62caabb 3962 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
ef8efd7a 3963 .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
2de9d0cc 3964 .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
c7c9c56c 3965 .load_eoi_exitmap = svm_load_eoi_exitmap,
44a95dae
SS
3966 .hwapic_irr_update = svm_hwapic_irr_update,
3967 .hwapic_isr_update = svm_hwapic_isr_update,
fa59cc00 3968 .sync_pir_to_irr = kvm_lapic_find_highest_irr,
be8ca170 3969 .apicv_post_state_restore = avic_post_state_restore,
cbc94022
IE
3970
3971 .set_tss_addr = svm_set_tss_addr,
2ac52ab8 3972 .set_identity_map_addr = svm_set_identity_map_addr,
67253af5 3973 .get_tdp_level = get_npt_level,
4b12f0de 3974 .get_mt_mask = svm_get_mt_mask,
229456fc 3975
586f9607 3976 .get_exit_info = svm_get_exit_info,
586f9607 3977
0e851880 3978 .cpuid_update = svm_cpuid_update,
4e47c7a6 3979
f5f48ee1 3980 .has_wbinvd_exit = svm_has_wbinvd_exit,
99e3e30a 3981
e79f245d 3982 .read_l1_tsc_offset = svm_read_l1_tsc_offset,
326e7425 3983 .write_l1_tsc_offset = svm_write_l1_tsc_offset,
1c97f0a0 3984
727a7e27 3985 .load_mmu_pgd = svm_load_mmu_pgd,
8a76d7f2
JR
3986
3987 .check_intercept = svm_check_intercept,
95b5a48c 3988 .handle_exit_irqoff = svm_handle_exit_irqoff,
ae97a3b8 3989
d264ee0c
SC
3990 .request_immediate_exit = __kvm_request_immediate_exit,
3991
ae97a3b8 3992 .sched_in = svm_sched_in,
25462f7f
WH
3993
3994 .pmu_ops = &amd_pmu_ops,
340d3bc3 3995 .deliver_posted_interrupt = svm_deliver_avic_intr,
17e433b5 3996 .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
411b44ba 3997 .update_pi_irte = svm_update_pi_irte,
74f16909 3998 .setup_mce = svm_setup_mce,
0234bf88 3999
72d7b374 4000 .smi_allowed = svm_smi_allowed,
0234bf88
LP
4001 .pre_enter_smm = svm_pre_enter_smm,
4002 .pre_leave_smm = svm_pre_leave_smm,
cc3d967f 4003 .enable_smi_window = enable_smi_window,
1654efcb
BS
4004
4005 .mem_enc_op = svm_mem_enc_op,
1e80fdc0
BS
4006 .mem_enc_reg_region = svm_register_enc_region,
4007 .mem_enc_unreg_region = svm_unregister_enc_region,
57b119da 4008
956e255c 4009 .nested_enable_evmcs = NULL,
ea152987 4010 .nested_get_evmcs_version = NULL,
05d5a486
SB
4011
4012 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
4b9852f4
LA
4013
4014 .apic_init_signal_blocked = svm_apic_init_signal_blocked,
b518ba9f
PB
4015
4016 .check_nested_events = svm_check_nested_events,
6aa8b732
AK
4017};
4018
d008dfdb
SC
4019static struct kvm_x86_init_ops svm_init_ops __initdata = {
4020 .cpu_has_kvm_support = has_svm,
4021 .disabled_by_bios = is_disabled,
4022 .hardware_setup = svm_hardware_setup,
4023 .check_processor_compatibility = svm_check_processor_compat,
4024
4025 .runtime_ops = &svm_x86_ops,
6aa8b732
AK
4026};
4027
4028static int __init svm_init(void)
4029{
d008dfdb 4030 return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
0ee75bea 4031 __alignof__(struct vcpu_svm), THIS_MODULE);
6aa8b732
AK
4032}
4033
4034static void __exit svm_exit(void)
4035{
cb498ea2 4036 kvm_exit();
6aa8b732
AK
4037}
4038
4039module_init(svm_init)
4040module_exit(svm_exit)