1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kvm_host.h>
7 #include "kvm_cache_regs.h"
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
33 #include <asm/perf_event.h>
34 #include <asm/tlbflush.h>
36 #include <asm/debugreg.h>
37 #include <asm/kvm_para.h>
38 #include <asm/irq_remapping.h>
39 #include <asm/spec-ctrl.h>
40 #include <asm/cpu_device_id.h>
41 #include <asm/traps.h>
42 #include <asm/reboot.h>
43 #include <asm/fpu/api.h>
45 #include <trace/events/ipi.h>
52 #include "kvm_onhyperv.h"
53 #include "svm_onhyperv.h"
55 MODULE_AUTHOR("Qumranet");
56 MODULE_LICENSE("GPL");
59 static const struct x86_cpu_id svm_cpu_id
[] = {
60 X86_MATCH_FEATURE(X86_FEATURE_SVM
, NULL
),
63 MODULE_DEVICE_TABLE(x86cpu
, svm_cpu_id
);
66 #define SEG_TYPE_LDT 2
67 #define SEG_TYPE_BUSY_TSS16 3
69 static bool erratum_383_found __read_mostly
;
71 u32 msrpm_offsets
[MSRPM_OFFSETS
] __read_mostly
;
74 * Set osvw_len to higher value when updated Revision Guides
75 * are published and we know what the new status bits are
77 static uint64_t osvw_len
= 4, osvw_status
;
79 static DEFINE_PER_CPU(u64
, current_tsc_ratio
);
81 #define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
83 static const struct svm_direct_access_msrs
{
84 u32 index
; /* Index of the MSR */
85 bool always
; /* True if intercept is initially cleared */
86 } direct_access_msrs
[MAX_DIRECT_ACCESS_MSRS
] = {
87 { .index
= MSR_STAR
, .always
= true },
88 { .index
= MSR_IA32_SYSENTER_CS
, .always
= true },
89 { .index
= MSR_IA32_SYSENTER_EIP
, .always
= false },
90 { .index
= MSR_IA32_SYSENTER_ESP
, .always
= false },
92 { .index
= MSR_GS_BASE
, .always
= true },
93 { .index
= MSR_FS_BASE
, .always
= true },
94 { .index
= MSR_KERNEL_GS_BASE
, .always
= true },
95 { .index
= MSR_LSTAR
, .always
= true },
96 { .index
= MSR_CSTAR
, .always
= true },
97 { .index
= MSR_SYSCALL_MASK
, .always
= true },
99 { .index
= MSR_IA32_SPEC_CTRL
, .always
= false },
100 { .index
= MSR_IA32_PRED_CMD
, .always
= false },
101 { .index
= MSR_IA32_FLUSH_CMD
, .always
= false },
102 { .index
= MSR_IA32_LASTBRANCHFROMIP
, .always
= false },
103 { .index
= MSR_IA32_LASTBRANCHTOIP
, .always
= false },
104 { .index
= MSR_IA32_LASTINTFROMIP
, .always
= false },
105 { .index
= MSR_IA32_LASTINTTOIP
, .always
= false },
106 { .index
= MSR_IA32_XSS
, .always
= false },
107 { .index
= MSR_EFER
, .always
= false },
108 { .index
= MSR_IA32_CR_PAT
, .always
= false },
109 { .index
= MSR_AMD64_SEV_ES_GHCB
, .always
= true },
110 { .index
= MSR_TSC_AUX
, .always
= false },
111 { .index
= X2APIC_MSR(APIC_ID
), .always
= false },
112 { .index
= X2APIC_MSR(APIC_LVR
), .always
= false },
113 { .index
= X2APIC_MSR(APIC_TASKPRI
), .always
= false },
114 { .index
= X2APIC_MSR(APIC_ARBPRI
), .always
= false },
115 { .index
= X2APIC_MSR(APIC_PROCPRI
), .always
= false },
116 { .index
= X2APIC_MSR(APIC_EOI
), .always
= false },
117 { .index
= X2APIC_MSR(APIC_RRR
), .always
= false },
118 { .index
= X2APIC_MSR(APIC_LDR
), .always
= false },
119 { .index
= X2APIC_MSR(APIC_DFR
), .always
= false },
120 { .index
= X2APIC_MSR(APIC_SPIV
), .always
= false },
121 { .index
= X2APIC_MSR(APIC_ISR
), .always
= false },
122 { .index
= X2APIC_MSR(APIC_TMR
), .always
= false },
123 { .index
= X2APIC_MSR(APIC_IRR
), .always
= false },
124 { .index
= X2APIC_MSR(APIC_ESR
), .always
= false },
125 { .index
= X2APIC_MSR(APIC_ICR
), .always
= false },
126 { .index
= X2APIC_MSR(APIC_ICR2
), .always
= false },
130 * AMD does not virtualize APIC TSC-deadline timer mode, but it is
131 * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
132 * the AVIC hardware would generate GP fault. Therefore, always
133 * intercept the MSR 0x832, and do not setup direct_access_msr.
135 { .index
= X2APIC_MSR(APIC_LVTTHMR
), .always
= false },
136 { .index
= X2APIC_MSR(APIC_LVTPC
), .always
= false },
137 { .index
= X2APIC_MSR(APIC_LVT0
), .always
= false },
138 { .index
= X2APIC_MSR(APIC_LVT1
), .always
= false },
139 { .index
= X2APIC_MSR(APIC_LVTERR
), .always
= false },
140 { .index
= X2APIC_MSR(APIC_TMICT
), .always
= false },
141 { .index
= X2APIC_MSR(APIC_TMCCT
), .always
= false },
142 { .index
= X2APIC_MSR(APIC_TDCR
), .always
= false },
143 { .index
= MSR_INVALID
, .always
= false },
147 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
148 * pause_filter_count: On processors that support Pause filtering(indicated
149 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
150 * count value. On VMRUN this value is loaded into an internal counter.
151 * Each time a pause instruction is executed, this counter is decremented
152 * until it reaches zero at which time a #VMEXIT is generated if pause
153 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
154 * Intercept Filtering for more details.
155 * This also indicate if ple logic enabled.
157 * pause_filter_thresh: In addition, some processor families support advanced
158 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
159 * the amount of time a guest is allowed to execute in a pause loop.
160 * In this mode, a 16-bit pause filter threshold field is added in the
161 * VMCB. The threshold value is a cycle count that is used to reset the
162 * pause counter. As with simple pause filtering, VMRUN loads the pause
163 * count value from VMCB into an internal counter. Then, on each pause
164 * instruction the hardware checks the elapsed number of cycles since
165 * the most recent pause instruction against the pause filter threshold.
166 * If the elapsed cycle count is greater than the pause filter threshold,
167 * then the internal pause count is reloaded from the VMCB and execution
168 * continues. If the elapsed cycle count is less than the pause filter
169 * threshold, then the internal pause count is decremented. If the count
170 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
171 * triggered. If advanced pause filtering is supported and pause filter
172 * threshold field is set to zero, the filter will operate in the simpler,
176 static unsigned short pause_filter_thresh
= KVM_DEFAULT_PLE_GAP
;
177 module_param(pause_filter_thresh
, ushort
, 0444);
179 static unsigned short pause_filter_count
= KVM_SVM_DEFAULT_PLE_WINDOW
;
180 module_param(pause_filter_count
, ushort
, 0444);
182 /* Default doubles per-vcpu window every exit. */
183 static unsigned short pause_filter_count_grow
= KVM_DEFAULT_PLE_WINDOW_GROW
;
184 module_param(pause_filter_count_grow
, ushort
, 0444);
186 /* Default resets per-vcpu window every exit to pause_filter_count. */
187 static unsigned short pause_filter_count_shrink
= KVM_DEFAULT_PLE_WINDOW_SHRINK
;
188 module_param(pause_filter_count_shrink
, ushort
, 0444);
190 /* Default is to compute the maximum so we can never overflow. */
191 static unsigned short pause_filter_count_max
= KVM_SVM_DEFAULT_PLE_WINDOW_MAX
;
192 module_param(pause_filter_count_max
, ushort
, 0444);
195 * Use nested page tables by default. Note, NPT may get forced off by
196 * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
198 bool npt_enabled
= true;
199 module_param_named(npt
, npt_enabled
, bool, 0444);
201 /* allow nested virtualization in KVM/SVM */
202 static int nested
= true;
203 module_param(nested
, int, 0444);
205 /* enable/disable Next RIP Save */
207 module_param(nrips
, int, 0444);
209 /* enable/disable Virtual VMLOAD VMSAVE */
210 static int vls
= true;
211 module_param(vls
, int, 0444);
213 /* enable/disable Virtual GIF */
215 module_param(vgif
, int, 0444);
217 /* enable/disable LBR virtualization */
218 static int lbrv
= true;
219 module_param(lbrv
, int, 0444);
221 static int tsc_scaling
= true;
222 module_param(tsc_scaling
, int, 0444);
225 * enable / disable AVIC. Because the defaults differ for APICv
226 * support between VMX and SVM we cannot use module_param_named.
229 module_param(avic
, bool, 0444);
231 bool __read_mostly dump_invalid_vmcb
;
232 module_param(dump_invalid_vmcb
, bool, 0644);
235 bool intercept_smi
= true;
236 module_param(intercept_smi
, bool, 0444);
239 module_param(vnmi
, bool, 0444);
241 static bool svm_gp_erratum_intercept
= true;
243 static u8 rsm_ins_bytes
[] = "\x0f\xaa";
245 static unsigned long iopm_base
;
247 DEFINE_PER_CPU(struct svm_cpu_data
, svm_data
);
250 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
251 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
253 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
254 * defer the restoration of TSC_AUX until the CPU returns to userspace.
256 static int tsc_aux_uret_slot __read_mostly
= -1;
258 static const u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
260 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
261 #define MSRS_RANGE_SIZE 2048
262 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
264 u32
svm_msrpm_offset(u32 msr
)
269 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
270 if (msr
< msrpm_ranges
[i
] ||
271 msr
>= msrpm_ranges
[i
] + MSRS_IN_RANGE
)
274 offset
= (msr
- msrpm_ranges
[i
]) / 4; /* 4 msrs per u8 */
275 offset
+= (i
* MSRS_RANGE_SIZE
); /* add range offset */
277 /* Now we have the u8 offset - but need the u32 offset */
281 /* MSR not in any range */
285 static void svm_flush_tlb_current(struct kvm_vcpu
*vcpu
);
287 static int get_npt_level(void)
290 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL
: PT64_ROOT_4LEVEL
;
292 return PT32E_ROOT_LEVEL
;
296 int svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
298 struct vcpu_svm
*svm
= to_svm(vcpu
);
299 u64 old_efer
= vcpu
->arch
.efer
;
300 vcpu
->arch
.efer
= efer
;
303 /* Shadow paging assumes NX to be available. */
306 if (!(efer
& EFER_LMA
))
310 if ((old_efer
& EFER_SVME
) != (efer
& EFER_SVME
)) {
311 if (!(efer
& EFER_SVME
)) {
312 svm_leave_nested(vcpu
);
313 svm_set_gif(svm
, true);
314 /* #GP intercept is still needed for vmware backdoor */
315 if (!enable_vmware_backdoor
)
316 clr_exception_intercept(svm
, GP_VECTOR
);
319 * Free the nested guest state, unless we are in SMM.
320 * In this case we will return to the nested guest
321 * as soon as we leave SMM.
324 svm_free_nested(svm
);
327 int ret
= svm_allocate_nested(svm
);
330 vcpu
->arch
.efer
= old_efer
;
335 * Never intercept #GP for SEV guests, KVM can't
336 * decrypt guest memory to workaround the erratum.
338 if (svm_gp_erratum_intercept
&& !sev_guest(vcpu
->kvm
))
339 set_exception_intercept(svm
, GP_VECTOR
);
343 svm
->vmcb
->save
.efer
= efer
| EFER_SVME
;
344 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
348 static u32
svm_get_interrupt_shadow(struct kvm_vcpu
*vcpu
)
350 struct vcpu_svm
*svm
= to_svm(vcpu
);
353 if (svm
->vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
)
354 ret
= KVM_X86_SHADOW_INT_STI
| KVM_X86_SHADOW_INT_MOV_SS
;
358 static void svm_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
)
360 struct vcpu_svm
*svm
= to_svm(vcpu
);
363 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
365 svm
->vmcb
->control
.int_state
|= SVM_INTERRUPT_SHADOW_MASK
;
369 static int __svm_skip_emulated_instruction(struct kvm_vcpu
*vcpu
,
370 bool commit_side_effects
)
372 struct vcpu_svm
*svm
= to_svm(vcpu
);
373 unsigned long old_rflags
;
376 * SEV-ES does not expose the next RIP. The RIP update is controlled by
377 * the type of exit and the #VC handler in the guest.
379 if (sev_es_guest(vcpu
->kvm
))
382 if (nrips
&& svm
->vmcb
->control
.next_rip
!= 0) {
383 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS
));
384 svm
->next_rip
= svm
->vmcb
->control
.next_rip
;
387 if (!svm
->next_rip
) {
388 if (unlikely(!commit_side_effects
))
389 old_rflags
= svm
->vmcb
->save
.rflags
;
391 if (!kvm_emulate_instruction(vcpu
, EMULTYPE_SKIP
))
394 if (unlikely(!commit_side_effects
))
395 svm
->vmcb
->save
.rflags
= old_rflags
;
397 kvm_rip_write(vcpu
, svm
->next_rip
);
401 if (likely(commit_side_effects
))
402 svm_set_interrupt_shadow(vcpu
, 0);
407 static int svm_skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
409 return __svm_skip_emulated_instruction(vcpu
, true);
412 static int svm_update_soft_interrupt_rip(struct kvm_vcpu
*vcpu
)
414 unsigned long rip
, old_rip
= kvm_rip_read(vcpu
);
415 struct vcpu_svm
*svm
= to_svm(vcpu
);
418 * Due to architectural shortcomings, the CPU doesn't always provide
419 * NextRIP, e.g. if KVM intercepted an exception that occurred while
420 * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip
421 * the instruction even if NextRIP is supported to acquire the next
422 * RIP so that it can be shoved into the NextRIP field, otherwise
423 * hardware will fail to advance guest RIP during event injection.
424 * Drop the exception/interrupt if emulation fails and effectively
425 * retry the instruction, it's the least awful option. If NRIPS is
426 * in use, the skip must not commit any side effects such as clearing
427 * the interrupt shadow or RFLAGS.RF.
429 if (!__svm_skip_emulated_instruction(vcpu
, !nrips
))
432 rip
= kvm_rip_read(vcpu
);
435 * Save the injection information, even when using next_rip, as the
436 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
437 * doesn't complete due to a VM-Exit occurring while the CPU is
438 * vectoring the event. Decoding the instruction isn't guaranteed to
439 * work as there may be no backing instruction, e.g. if the event is
440 * being injected by L1 for L2, or if the guest is patching INT3 into
441 * a different instruction.
443 svm
->soft_int_injected
= true;
444 svm
->soft_int_csbase
= svm
->vmcb
->save
.cs
.base
;
445 svm
->soft_int_old_rip
= old_rip
;
446 svm
->soft_int_next_rip
= rip
;
449 kvm_rip_write(vcpu
, old_rip
);
451 if (static_cpu_has(X86_FEATURE_NRIPS
))
452 svm
->vmcb
->control
.next_rip
= rip
;
457 static void svm_inject_exception(struct kvm_vcpu
*vcpu
)
459 struct kvm_queued_exception
*ex
= &vcpu
->arch
.exception
;
460 struct vcpu_svm
*svm
= to_svm(vcpu
);
462 kvm_deliver_exception_payload(vcpu
, ex
);
464 if (kvm_exception_is_soft(ex
->vector
) &&
465 svm_update_soft_interrupt_rip(vcpu
))
468 svm
->vmcb
->control
.event_inj
= ex
->vector
470 | (ex
->has_error_code
? SVM_EVTINJ_VALID_ERR
: 0)
471 | SVM_EVTINJ_TYPE_EXEPT
;
472 svm
->vmcb
->control
.event_inj_err
= ex
->error_code
;
475 static void svm_init_erratum_383(void)
481 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH
))
484 /* Use _safe variants to not break nested virtualization */
485 val
= native_read_msr_safe(MSR_AMD64_DC_CFG
, &err
);
491 low
= lower_32_bits(val
);
492 high
= upper_32_bits(val
);
494 native_write_msr_safe(MSR_AMD64_DC_CFG
, low
, high
);
496 erratum_383_found
= true;
499 static void svm_init_osvw(struct kvm_vcpu
*vcpu
)
502 * Guests should see errata 400 and 415 as fixed (assuming that
503 * HLT and IO instructions are intercepted).
505 vcpu
->arch
.osvw
.length
= (osvw_len
>= 3) ? (osvw_len
) : 3;
506 vcpu
->arch
.osvw
.status
= osvw_status
& ~(6ULL);
509 * By increasing VCPU's osvw.length to 3 we are telling the guest that
510 * all osvw.status bits inside that length, including bit 0 (which is
511 * reserved for erratum 298), are valid. However, if host processor's
512 * osvw_len is 0 then osvw_status[0] carries no information. We need to
513 * be conservative here and therefore we tell the guest that erratum 298
514 * is present (because we really don't know).
516 if (osvw_len
== 0 && boot_cpu_data
.x86
== 0x10)
517 vcpu
->arch
.osvw
.status
|= 1;
520 static bool __kvm_is_svm_supported(void)
522 int cpu
= smp_processor_id();
523 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
525 if (c
->x86_vendor
!= X86_VENDOR_AMD
&&
526 c
->x86_vendor
!= X86_VENDOR_HYGON
) {
527 pr_err("CPU %d isn't AMD or Hygon\n", cpu
);
531 if (!cpu_has(c
, X86_FEATURE_SVM
)) {
532 pr_err("SVM not supported by CPU %d\n", cpu
);
536 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
)) {
537 pr_info("KVM is unsupported when running as an SEV guest\n");
544 static bool kvm_is_svm_supported(void)
549 supported
= __kvm_is_svm_supported();
555 static int svm_check_processor_compat(void)
557 if (!__kvm_is_svm_supported())
563 static void __svm_write_tsc_multiplier(u64 multiplier
)
565 if (multiplier
== __this_cpu_read(current_tsc_ratio
))
568 wrmsrl(MSR_AMD64_TSC_RATIO
, multiplier
);
569 __this_cpu_write(current_tsc_ratio
, multiplier
);
572 static inline void kvm_cpu_svm_disable(void)
576 wrmsrl(MSR_VM_HSAVE_PA
, 0);
577 rdmsrl(MSR_EFER
, efer
);
578 if (efer
& EFER_SVME
) {
580 * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
581 * NMI aren't blocked.
584 wrmsrl(MSR_EFER
, efer
& ~EFER_SVME
);
588 static void svm_emergency_disable(void)
590 kvm_rebooting
= true;
592 kvm_cpu_svm_disable();
595 static void svm_hardware_disable(void)
597 /* Make sure we clean up behind us */
599 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT
);
601 kvm_cpu_svm_disable();
603 amd_pmu_disable_virt();
606 static int svm_hardware_enable(void)
609 struct svm_cpu_data
*sd
;
611 int me
= raw_smp_processor_id();
613 rdmsrl(MSR_EFER
, efer
);
614 if (efer
& EFER_SVME
)
617 sd
= per_cpu_ptr(&svm_data
, me
);
618 sd
->asid_generation
= 1;
619 sd
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
620 sd
->next_asid
= sd
->max_asid
+ 1;
621 sd
->min_asid
= max_sev_asid
+ 1;
623 wrmsrl(MSR_EFER
, efer
| EFER_SVME
);
625 wrmsrl(MSR_VM_HSAVE_PA
, sd
->save_area_pa
);
627 if (static_cpu_has(X86_FEATURE_TSCRATEMSR
)) {
629 * Set the default value, even if we don't use TSC scaling
630 * to avoid having stale value in the msr
632 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT
);
639 * Note that it is possible to have a system with mixed processor
640 * revisions and therefore different OSVW bits. If bits are not the same
641 * on different processors then choose the worst case (i.e. if erratum
642 * is present on one processor and not on another then assume that the
643 * erratum is present everywhere).
645 if (cpu_has(&boot_cpu_data
, X86_FEATURE_OSVW
)) {
646 uint64_t len
, status
= 0;
649 len
= native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH
, &err
);
651 status
= native_read_msr_safe(MSR_AMD64_OSVW_STATUS
,
655 osvw_status
= osvw_len
= 0;
659 osvw_status
|= status
;
660 osvw_status
&= (1ULL << osvw_len
) - 1;
663 osvw_status
= osvw_len
= 0;
665 svm_init_erratum_383();
667 amd_pmu_enable_virt();
670 * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
671 * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
672 * Since Linux does not change the value of TSC_AUX once set, prime the
673 * TSC_AUX field now to avoid a RDMSR on every vCPU run.
675 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX
)) {
676 struct sev_es_save_area
*hostsa
;
677 u32 __maybe_unused msr_hi
;
679 hostsa
= (struct sev_es_save_area
*)(page_address(sd
->save_area
) + 0x400);
681 rdmsr(MSR_TSC_AUX
, hostsa
->tsc_aux
, msr_hi
);
687 static void svm_cpu_uninit(int cpu
)
689 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, cpu
);
694 kfree(sd
->sev_vmcbs
);
695 __free_page(sd
->save_area
);
696 sd
->save_area_pa
= 0;
697 sd
->save_area
= NULL
;
700 static int svm_cpu_init(int cpu
)
702 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, cpu
);
705 memset(sd
, 0, sizeof(struct svm_cpu_data
));
706 sd
->save_area
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
710 ret
= sev_cpu_init(sd
);
714 sd
->save_area_pa
= __sme_page_pa(sd
->save_area
);
718 __free_page(sd
->save_area
);
719 sd
->save_area
= NULL
;
724 static void set_dr_intercepts(struct vcpu_svm
*svm
)
726 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
728 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_READ
);
729 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_READ
);
730 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_READ
);
731 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_READ
);
732 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_READ
);
733 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_READ
);
734 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_READ
);
735 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_WRITE
);
736 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_WRITE
);
737 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_WRITE
);
738 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_WRITE
);
739 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_WRITE
);
740 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_WRITE
);
741 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_WRITE
);
742 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
743 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
745 recalc_intercepts(svm
);
748 static void clr_dr_intercepts(struct vcpu_svm
*svm
)
750 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
752 vmcb
->control
.intercepts
[INTERCEPT_DR
] = 0;
754 recalc_intercepts(svm
);
757 static int direct_access_msr_slot(u32 msr
)
761 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++)
762 if (direct_access_msrs
[i
].index
== msr
)
768 static void set_shadow_msr_intercept(struct kvm_vcpu
*vcpu
, u32 msr
, int read
,
771 struct vcpu_svm
*svm
= to_svm(vcpu
);
772 int slot
= direct_access_msr_slot(msr
);
777 /* Set the shadow bitmaps to the desired intercept states */
779 set_bit(slot
, svm
->shadow_msr_intercept
.read
);
781 clear_bit(slot
, svm
->shadow_msr_intercept
.read
);
784 set_bit(slot
, svm
->shadow_msr_intercept
.write
);
786 clear_bit(slot
, svm
->shadow_msr_intercept
.write
);
789 static bool valid_msr_intercept(u32 index
)
791 return direct_access_msr_slot(index
) != -ENOENT
;
794 static bool msr_write_intercepted(struct kvm_vcpu
*vcpu
, u32 msr
)
802 * For non-nested case:
803 * If the L01 MSR bitmap does not intercept the MSR, then we need to
807 * If the L02 MSR bitmap does not intercept the MSR, then we need to
810 msrpm
= is_guest_mode(vcpu
) ? to_svm(vcpu
)->nested
.msrpm
:
813 offset
= svm_msrpm_offset(msr
);
814 bit_write
= 2 * (msr
& 0x0f) + 1;
817 BUG_ON(offset
== MSR_INVALID
);
819 return test_bit(bit_write
, &tmp
);
822 static void set_msr_interception_bitmap(struct kvm_vcpu
*vcpu
, u32
*msrpm
,
823 u32 msr
, int read
, int write
)
825 struct vcpu_svm
*svm
= to_svm(vcpu
);
826 u8 bit_read
, bit_write
;
831 * If this warning triggers extend the direct_access_msrs list at the
832 * beginning of the file
834 WARN_ON(!valid_msr_intercept(msr
));
836 /* Enforce non allowed MSRs to trap */
837 if (read
&& !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_READ
))
840 if (write
&& !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_WRITE
))
843 offset
= svm_msrpm_offset(msr
);
844 bit_read
= 2 * (msr
& 0x0f);
845 bit_write
= 2 * (msr
& 0x0f) + 1;
848 BUG_ON(offset
== MSR_INVALID
);
850 read
? clear_bit(bit_read
, &tmp
) : set_bit(bit_read
, &tmp
);
851 write
? clear_bit(bit_write
, &tmp
) : set_bit(bit_write
, &tmp
);
855 svm_hv_vmcb_dirty_nested_enlightenments(vcpu
);
856 svm
->nested
.force_msr_bitmap_recalc
= true;
859 void set_msr_interception(struct kvm_vcpu
*vcpu
, u32
*msrpm
, u32 msr
,
862 set_shadow_msr_intercept(vcpu
, msr
, read
, write
);
863 set_msr_interception_bitmap(vcpu
, msrpm
, msr
, read
, write
);
866 u32
*svm_vcpu_alloc_msrpm(void)
868 unsigned int order
= get_order(MSRPM_SIZE
);
869 struct page
*pages
= alloc_pages(GFP_KERNEL_ACCOUNT
, order
);
875 msrpm
= page_address(pages
);
876 memset(msrpm
, 0xff, PAGE_SIZE
* (1 << order
));
881 void svm_vcpu_init_msrpm(struct kvm_vcpu
*vcpu
, u32
*msrpm
)
885 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++) {
886 if (!direct_access_msrs
[i
].always
)
888 set_msr_interception(vcpu
, msrpm
, direct_access_msrs
[i
].index
, 1, 1);
892 void svm_set_x2apic_msr_interception(struct vcpu_svm
*svm
, bool intercept
)
896 if (intercept
== svm
->x2avic_msrs_intercepted
)
902 for (i
= 0; i
< MAX_DIRECT_ACCESS_MSRS
; i
++) {
903 int index
= direct_access_msrs
[i
].index
;
905 if ((index
< APIC_BASE_MSR
) ||
906 (index
> APIC_BASE_MSR
+ 0xff))
908 set_msr_interception(&svm
->vcpu
, svm
->msrpm
, index
,
909 !intercept
, !intercept
);
912 svm
->x2avic_msrs_intercepted
= intercept
;
915 void svm_vcpu_free_msrpm(u32
*msrpm
)
917 __free_pages(virt_to_page(msrpm
), get_order(MSRPM_SIZE
));
920 static void svm_msr_filter_changed(struct kvm_vcpu
*vcpu
)
922 struct vcpu_svm
*svm
= to_svm(vcpu
);
926 * Set intercept permissions for all direct access MSRs again. They
927 * will automatically get filtered through the MSR filter, so we are
928 * back in sync after this.
930 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++) {
931 u32 msr
= direct_access_msrs
[i
].index
;
932 u32 read
= test_bit(i
, svm
->shadow_msr_intercept
.read
);
933 u32 write
= test_bit(i
, svm
->shadow_msr_intercept
.write
);
935 set_msr_interception_bitmap(vcpu
, svm
->msrpm
, msr
, read
, write
);
939 static void add_msr_offset(u32 offset
)
943 for (i
= 0; i
< MSRPM_OFFSETS
; ++i
) {
945 /* Offset already in list? */
946 if (msrpm_offsets
[i
] == offset
)
949 /* Slot used by another offset? */
950 if (msrpm_offsets
[i
] != MSR_INVALID
)
953 /* Add offset to list */
954 msrpm_offsets
[i
] = offset
;
960 * If this BUG triggers the msrpm_offsets table has an overflow. Just
961 * increase MSRPM_OFFSETS in this case.
966 static void init_msrpm_offsets(void)
970 memset(msrpm_offsets
, 0xff, sizeof(msrpm_offsets
));
972 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++) {
975 offset
= svm_msrpm_offset(direct_access_msrs
[i
].index
);
976 BUG_ON(offset
== MSR_INVALID
);
978 add_msr_offset(offset
);
982 void svm_copy_lbrs(struct vmcb
*to_vmcb
, struct vmcb
*from_vmcb
)
984 to_vmcb
->save
.dbgctl
= from_vmcb
->save
.dbgctl
;
985 to_vmcb
->save
.br_from
= from_vmcb
->save
.br_from
;
986 to_vmcb
->save
.br_to
= from_vmcb
->save
.br_to
;
987 to_vmcb
->save
.last_excp_from
= from_vmcb
->save
.last_excp_from
;
988 to_vmcb
->save
.last_excp_to
= from_vmcb
->save
.last_excp_to
;
990 vmcb_mark_dirty(to_vmcb
, VMCB_LBR
);
993 static void svm_enable_lbrv(struct kvm_vcpu
*vcpu
)
995 struct vcpu_svm
*svm
= to_svm(vcpu
);
997 svm
->vmcb
->control
.virt_ext
|= LBR_CTL_ENABLE_MASK
;
998 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 1, 1);
999 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHTOIP
, 1, 1);
1000 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTFROMIP
, 1, 1);
1001 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTTOIP
, 1, 1);
1003 /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
1004 if (is_guest_mode(vcpu
))
1005 svm_copy_lbrs(svm
->vmcb
, svm
->vmcb01
.ptr
);
1008 static void svm_disable_lbrv(struct kvm_vcpu
*vcpu
)
1010 struct vcpu_svm
*svm
= to_svm(vcpu
);
1012 svm
->vmcb
->control
.virt_ext
&= ~LBR_CTL_ENABLE_MASK
;
1013 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 0, 0);
1014 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHTOIP
, 0, 0);
1015 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTFROMIP
, 0, 0);
1016 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTTOIP
, 0, 0);
1019 * Move the LBR msrs back to the vmcb01 to avoid copying them
1020 * on nested guest entries.
1022 if (is_guest_mode(vcpu
))
1023 svm_copy_lbrs(svm
->vmcb01
.ptr
, svm
->vmcb
);
1026 static struct vmcb
*svm_get_lbr_vmcb(struct vcpu_svm
*svm
)
1029 * If LBR virtualization is disabled, the LBR MSRs are always kept in
1030 * vmcb01. If LBR virtualization is enabled and L1 is running VMs of
1031 * its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
1033 return svm
->vmcb
->control
.virt_ext
& LBR_CTL_ENABLE_MASK
? svm
->vmcb
:
1037 void svm_update_lbrv(struct kvm_vcpu
*vcpu
)
1039 struct vcpu_svm
*svm
= to_svm(vcpu
);
1040 bool current_enable_lbrv
= svm
->vmcb
->control
.virt_ext
& LBR_CTL_ENABLE_MASK
;
1041 bool enable_lbrv
= (svm_get_lbr_vmcb(svm
)->save
.dbgctl
& DEBUGCTLMSR_LBR
) ||
1042 (is_guest_mode(vcpu
) && guest_can_use(vcpu
, X86_FEATURE_LBRV
) &&
1043 (svm
->nested
.ctl
.virt_ext
& LBR_CTL_ENABLE_MASK
));
1045 if (enable_lbrv
== current_enable_lbrv
)
1049 svm_enable_lbrv(vcpu
);
1051 svm_disable_lbrv(vcpu
);
1054 void disable_nmi_singlestep(struct vcpu_svm
*svm
)
1056 svm
->nmi_singlestep
= false;
1058 if (!(svm
->vcpu
.guest_debug
& KVM_GUESTDBG_SINGLESTEP
)) {
1059 /* Clear our flags if they were not set by the guest */
1060 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_TF
))
1061 svm
->vmcb
->save
.rflags
&= ~X86_EFLAGS_TF
;
1062 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_RF
))
1063 svm
->vmcb
->save
.rflags
&= ~X86_EFLAGS_RF
;
1067 static void grow_ple_window(struct kvm_vcpu
*vcpu
)
1069 struct vcpu_svm
*svm
= to_svm(vcpu
);
1070 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1071 int old
= control
->pause_filter_count
;
1073 if (kvm_pause_in_guest(vcpu
->kvm
))
1076 control
->pause_filter_count
= __grow_ple_window(old
,
1078 pause_filter_count_grow
,
1079 pause_filter_count_max
);
1081 if (control
->pause_filter_count
!= old
) {
1082 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTERCEPTS
);
1083 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
1084 control
->pause_filter_count
, old
);
1088 static void shrink_ple_window(struct kvm_vcpu
*vcpu
)
1090 struct vcpu_svm
*svm
= to_svm(vcpu
);
1091 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1092 int old
= control
->pause_filter_count
;
1094 if (kvm_pause_in_guest(vcpu
->kvm
))
1097 control
->pause_filter_count
=
1098 __shrink_ple_window(old
,
1100 pause_filter_count_shrink
,
1101 pause_filter_count
);
1102 if (control
->pause_filter_count
!= old
) {
1103 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTERCEPTS
);
1104 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
1105 control
->pause_filter_count
, old
);
1109 static void svm_hardware_unsetup(void)
1113 sev_hardware_unsetup();
1115 for_each_possible_cpu(cpu
)
1116 svm_cpu_uninit(cpu
);
1118 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
),
1119 get_order(IOPM_SIZE
));
1123 static void init_seg(struct vmcb_seg
*seg
)
1126 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
1127 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
1128 seg
->limit
= 0xffff;
1132 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
1135 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
1136 seg
->limit
= 0xffff;
1140 static u64
svm_get_l2_tsc_offset(struct kvm_vcpu
*vcpu
)
1142 struct vcpu_svm
*svm
= to_svm(vcpu
);
1144 return svm
->nested
.ctl
.tsc_offset
;
1147 static u64
svm_get_l2_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1149 struct vcpu_svm
*svm
= to_svm(vcpu
);
1151 return svm
->tsc_ratio_msr
;
1154 static void svm_write_tsc_offset(struct kvm_vcpu
*vcpu
)
1156 struct vcpu_svm
*svm
= to_svm(vcpu
);
1158 svm
->vmcb01
.ptr
->control
.tsc_offset
= vcpu
->arch
.l1_tsc_offset
;
1159 svm
->vmcb
->control
.tsc_offset
= vcpu
->arch
.tsc_offset
;
1160 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTERCEPTS
);
1163 void svm_write_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1166 if (to_svm(vcpu
)->guest_state_loaded
)
1167 __svm_write_tsc_multiplier(vcpu
->arch
.tsc_scaling_ratio
);
1171 /* Evaluate instruction intercepts that depend on guest CPUID features. */
1172 static void svm_recalc_instruction_intercepts(struct kvm_vcpu
*vcpu
,
1173 struct vcpu_svm
*svm
)
1176 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1177 * roots, or if INVPCID is disabled in the guest to inject #UD.
1179 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID
)) {
1181 !guest_cpuid_has(&svm
->vcpu
, X86_FEATURE_INVPCID
))
1182 svm_set_intercept(svm
, INTERCEPT_INVPCID
);
1184 svm_clr_intercept(svm
, INTERCEPT_INVPCID
);
1187 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP
)) {
1188 if (guest_cpuid_has(vcpu
, X86_FEATURE_RDTSCP
))
1189 svm_clr_intercept(svm
, INTERCEPT_RDTSCP
);
1191 svm_set_intercept(svm
, INTERCEPT_RDTSCP
);
1195 static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu
*vcpu
)
1197 struct vcpu_svm
*svm
= to_svm(vcpu
);
1199 if (guest_cpuid_is_intel(vcpu
)) {
1201 * We must intercept SYSENTER_EIP and SYSENTER_ESP
1202 * accesses because the processor only stores 32 bits.
1203 * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1205 svm_set_intercept(svm
, INTERCEPT_VMLOAD
);
1206 svm_set_intercept(svm
, INTERCEPT_VMSAVE
);
1207 svm
->vmcb
->control
.virt_ext
&= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK
;
1209 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_EIP
, 0, 0);
1210 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_ESP
, 0, 0);
1213 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1214 * in VMCB and clear intercepts to avoid #VMEXIT.
1217 svm_clr_intercept(svm
, INTERCEPT_VMLOAD
);
1218 svm_clr_intercept(svm
, INTERCEPT_VMSAVE
);
1219 svm
->vmcb
->control
.virt_ext
|= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK
;
1221 /* No need to intercept these MSRs */
1222 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_EIP
, 1, 1);
1223 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_ESP
, 1, 1);
1227 static void init_vmcb(struct kvm_vcpu
*vcpu
)
1229 struct vcpu_svm
*svm
= to_svm(vcpu
);
1230 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
1231 struct vmcb_control_area
*control
= &vmcb
->control
;
1232 struct vmcb_save_area
*save
= &vmcb
->save
;
1234 svm_set_intercept(svm
, INTERCEPT_CR0_READ
);
1235 svm_set_intercept(svm
, INTERCEPT_CR3_READ
);
1236 svm_set_intercept(svm
, INTERCEPT_CR4_READ
);
1237 svm_set_intercept(svm
, INTERCEPT_CR0_WRITE
);
1238 svm_set_intercept(svm
, INTERCEPT_CR3_WRITE
);
1239 svm_set_intercept(svm
, INTERCEPT_CR4_WRITE
);
1240 if (!kvm_vcpu_apicv_active(vcpu
))
1241 svm_set_intercept(svm
, INTERCEPT_CR8_WRITE
);
1243 set_dr_intercepts(svm
);
1245 set_exception_intercept(svm
, PF_VECTOR
);
1246 set_exception_intercept(svm
, UD_VECTOR
);
1247 set_exception_intercept(svm
, MC_VECTOR
);
1248 set_exception_intercept(svm
, AC_VECTOR
);
1249 set_exception_intercept(svm
, DB_VECTOR
);
1251 * Guest access to VMware backdoor ports could legitimately
1252 * trigger #GP because of TSS I/O permission bitmap.
1253 * We intercept those #GP and allow access to them anyway
1256 if (enable_vmware_backdoor
)
1257 set_exception_intercept(svm
, GP_VECTOR
);
1259 svm_set_intercept(svm
, INTERCEPT_INTR
);
1260 svm_set_intercept(svm
, INTERCEPT_NMI
);
1263 svm_set_intercept(svm
, INTERCEPT_SMI
);
1265 svm_set_intercept(svm
, INTERCEPT_SELECTIVE_CR0
);
1266 svm_set_intercept(svm
, INTERCEPT_RDPMC
);
1267 svm_set_intercept(svm
, INTERCEPT_CPUID
);
1268 svm_set_intercept(svm
, INTERCEPT_INVD
);
1269 svm_set_intercept(svm
, INTERCEPT_INVLPG
);
1270 svm_set_intercept(svm
, INTERCEPT_INVLPGA
);
1271 svm_set_intercept(svm
, INTERCEPT_IOIO_PROT
);
1272 svm_set_intercept(svm
, INTERCEPT_MSR_PROT
);
1273 svm_set_intercept(svm
, INTERCEPT_TASK_SWITCH
);
1274 svm_set_intercept(svm
, INTERCEPT_SHUTDOWN
);
1275 svm_set_intercept(svm
, INTERCEPT_VMRUN
);
1276 svm_set_intercept(svm
, INTERCEPT_VMMCALL
);
1277 svm_set_intercept(svm
, INTERCEPT_VMLOAD
);
1278 svm_set_intercept(svm
, INTERCEPT_VMSAVE
);
1279 svm_set_intercept(svm
, INTERCEPT_STGI
);
1280 svm_set_intercept(svm
, INTERCEPT_CLGI
);
1281 svm_set_intercept(svm
, INTERCEPT_SKINIT
);
1282 svm_set_intercept(svm
, INTERCEPT_WBINVD
);
1283 svm_set_intercept(svm
, INTERCEPT_XSETBV
);
1284 svm_set_intercept(svm
, INTERCEPT_RDPRU
);
1285 svm_set_intercept(svm
, INTERCEPT_RSM
);
1287 if (!kvm_mwait_in_guest(vcpu
->kvm
)) {
1288 svm_set_intercept(svm
, INTERCEPT_MONITOR
);
1289 svm_set_intercept(svm
, INTERCEPT_MWAIT
);
1292 if (!kvm_hlt_in_guest(vcpu
->kvm
))
1293 svm_set_intercept(svm
, INTERCEPT_HLT
);
1295 control
->iopm_base_pa
= __sme_set(iopm_base
);
1296 control
->msrpm_base_pa
= __sme_set(__pa(svm
->msrpm
));
1297 control
->int_ctl
= V_INTR_MASKING_MASK
;
1299 init_seg(&save
->es
);
1300 init_seg(&save
->ss
);
1301 init_seg(&save
->ds
);
1302 init_seg(&save
->fs
);
1303 init_seg(&save
->gs
);
1305 save
->cs
.selector
= 0xf000;
1306 save
->cs
.base
= 0xffff0000;
1307 /* Executable/Readable Code Segment */
1308 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
1309 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
1310 save
->cs
.limit
= 0xffff;
1312 save
->gdtr
.base
= 0;
1313 save
->gdtr
.limit
= 0xffff;
1314 save
->idtr
.base
= 0;
1315 save
->idtr
.limit
= 0xffff;
1317 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
1318 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
1321 /* Setup VMCB for Nested Paging */
1322 control
->nested_ctl
|= SVM_NESTED_CTL_NP_ENABLE
;
1323 svm_clr_intercept(svm
, INTERCEPT_INVLPG
);
1324 clr_exception_intercept(svm
, PF_VECTOR
);
1325 svm_clr_intercept(svm
, INTERCEPT_CR3_READ
);
1326 svm_clr_intercept(svm
, INTERCEPT_CR3_WRITE
);
1327 save
->g_pat
= vcpu
->arch
.pat
;
1330 svm
->current_vmcb
->asid_generation
= 0;
1333 svm
->nested
.vmcb12_gpa
= INVALID_GPA
;
1334 svm
->nested
.last_vmcb12_gpa
= INVALID_GPA
;
1336 if (!kvm_pause_in_guest(vcpu
->kvm
)) {
1337 control
->pause_filter_count
= pause_filter_count
;
1338 if (pause_filter_thresh
)
1339 control
->pause_filter_thresh
= pause_filter_thresh
;
1340 svm_set_intercept(svm
, INTERCEPT_PAUSE
);
1342 svm_clr_intercept(svm
, INTERCEPT_PAUSE
);
1345 svm_recalc_instruction_intercepts(vcpu
, svm
);
1348 * If the host supports V_SPEC_CTRL then disable the interception
1349 * of MSR_IA32_SPEC_CTRL.
1351 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
1352 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SPEC_CTRL
, 1, 1);
1354 if (kvm_vcpu_apicv_active(vcpu
))
1355 avic_init_vmcb(svm
, vmcb
);
1358 svm
->vmcb
->control
.int_ctl
|= V_NMI_ENABLE_MASK
;
1361 svm_clr_intercept(svm
, INTERCEPT_STGI
);
1362 svm_clr_intercept(svm
, INTERCEPT_CLGI
);
1363 svm
->vmcb
->control
.int_ctl
|= V_GIF_ENABLE_MASK
;
1366 if (sev_guest(vcpu
->kvm
))
1369 svm_hv_init_vmcb(vmcb
);
1370 init_vmcb_after_set_cpuid(vcpu
);
1372 vmcb_mark_all_dirty(vmcb
);
1377 static void __svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
1379 struct vcpu_svm
*svm
= to_svm(vcpu
);
1381 svm_vcpu_init_msrpm(vcpu
, svm
->msrpm
);
1383 svm_init_osvw(vcpu
);
1384 vcpu
->arch
.microcode_version
= 0x01000065;
1385 svm
->tsc_ratio_msr
= kvm_caps
.default_tsc_scaling_ratio
;
1387 svm
->nmi_masked
= false;
1388 svm
->awaiting_iret_completion
= false;
1390 if (sev_es_guest(vcpu
->kvm
))
1391 sev_es_vcpu_reset(svm
);
1394 static void svm_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
1396 struct vcpu_svm
*svm
= to_svm(vcpu
);
1399 svm
->virt_spec_ctrl
= 0;
1404 __svm_vcpu_reset(vcpu
);
1407 void svm_switch_vmcb(struct vcpu_svm
*svm
, struct kvm_vmcb_info
*target_vmcb
)
1409 svm
->current_vmcb
= target_vmcb
;
1410 svm
->vmcb
= target_vmcb
->ptr
;
1413 static int svm_vcpu_create(struct kvm_vcpu
*vcpu
)
1415 struct vcpu_svm
*svm
;
1416 struct page
*vmcb01_page
;
1417 struct page
*vmsa_page
= NULL
;
1420 BUILD_BUG_ON(offsetof(struct vcpu_svm
, vcpu
) != 0);
1424 vmcb01_page
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
1428 if (sev_es_guest(vcpu
->kvm
)) {
1430 * SEV-ES guests require a separate VMSA page used to contain
1431 * the encrypted register state of the guest.
1433 vmsa_page
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
1435 goto error_free_vmcb_page
;
1438 * SEV-ES guests maintain an encrypted version of their FPU
1439 * state which is restored and saved on VMRUN and VMEXIT.
1440 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1441 * do xsave/xrstor on it.
1443 fpstate_set_confidential(&vcpu
->arch
.guest_fpu
);
1446 err
= avic_init_vcpu(svm
);
1448 goto error_free_vmsa_page
;
1450 svm
->msrpm
= svm_vcpu_alloc_msrpm();
1453 goto error_free_vmsa_page
;
1456 svm
->x2avic_msrs_intercepted
= true;
1458 svm
->vmcb01
.ptr
= page_address(vmcb01_page
);
1459 svm
->vmcb01
.pa
= __sme_set(page_to_pfn(vmcb01_page
) << PAGE_SHIFT
);
1460 svm_switch_vmcb(svm
, &svm
->vmcb01
);
1463 svm
->sev_es
.vmsa
= page_address(vmsa_page
);
1465 svm
->guest_state_loaded
= false;
1469 error_free_vmsa_page
:
1471 __free_page(vmsa_page
);
1472 error_free_vmcb_page
:
1473 __free_page(vmcb01_page
);
1478 static void svm_clear_current_vmcb(struct vmcb
*vmcb
)
1482 for_each_online_cpu(i
)
1483 cmpxchg(per_cpu_ptr(&svm_data
.current_vmcb
, i
), vmcb
, NULL
);
1486 static void svm_vcpu_free(struct kvm_vcpu
*vcpu
)
1488 struct vcpu_svm
*svm
= to_svm(vcpu
);
1491 * The vmcb page can be recycled, causing a false negative in
1492 * svm_vcpu_load(). So, ensure that no logical CPU has this
1493 * vmcb page recorded as its current vmcb.
1495 svm_clear_current_vmcb(svm
->vmcb
);
1497 svm_leave_nested(vcpu
);
1498 svm_free_nested(svm
);
1500 sev_free_vcpu(vcpu
);
1502 __free_page(pfn_to_page(__sme_clr(svm
->vmcb01
.pa
) >> PAGE_SHIFT
));
1503 __free_pages(virt_to_page(svm
->msrpm
), get_order(MSRPM_SIZE
));
1506 static void svm_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
)
1508 struct vcpu_svm
*svm
= to_svm(vcpu
);
1509 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, vcpu
->cpu
);
1511 if (sev_es_guest(vcpu
->kvm
))
1512 sev_es_unmap_ghcb(svm
);
1514 if (svm
->guest_state_loaded
)
1518 * Save additional host state that will be restored on VMEXIT (sev-es)
1519 * or subsequent vmload of host save area.
1521 vmsave(sd
->save_area_pa
);
1522 if (sev_es_guest(vcpu
->kvm
)) {
1523 struct sev_es_save_area
*hostsa
;
1524 hostsa
= (struct sev_es_save_area
*)(page_address(sd
->save_area
) + 0x400);
1526 sev_es_prepare_switch_to_guest(hostsa
);
1530 __svm_write_tsc_multiplier(vcpu
->arch
.tsc_scaling_ratio
);
1533 * TSC_AUX is always virtualized for SEV-ES guests when the feature is
1534 * available. The user return MSR support is not required in this case
1535 * because TSC_AUX is restored on #VMEXIT from the host save area
1536 * (which has been initialized in svm_hardware_enable()).
1538 if (likely(tsc_aux_uret_slot
>= 0) &&
1539 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX
) || !sev_es_guest(vcpu
->kvm
)))
1540 kvm_set_user_return_msr(tsc_aux_uret_slot
, svm
->tsc_aux
, -1ull);
1542 svm
->guest_state_loaded
= true;
1545 static void svm_prepare_host_switch(struct kvm_vcpu
*vcpu
)
1547 to_svm(vcpu
)->guest_state_loaded
= false;
1550 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1552 struct vcpu_svm
*svm
= to_svm(vcpu
);
1553 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, cpu
);
1555 if (sd
->current_vmcb
!= svm
->vmcb
) {
1556 sd
->current_vmcb
= svm
->vmcb
;
1558 if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT
))
1559 indirect_branch_prediction_barrier();
1561 if (kvm_vcpu_apicv_active(vcpu
))
1562 avic_vcpu_load(vcpu
, cpu
);
1565 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
1567 if (kvm_vcpu_apicv_active(vcpu
))
1568 avic_vcpu_put(vcpu
);
1570 svm_prepare_host_switch(vcpu
);
1572 ++vcpu
->stat
.host_state_reload
;
1575 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
1577 struct vcpu_svm
*svm
= to_svm(vcpu
);
1578 unsigned long rflags
= svm
->vmcb
->save
.rflags
;
1580 if (svm
->nmi_singlestep
) {
1581 /* Hide our flags if they were not set by the guest */
1582 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_TF
))
1583 rflags
&= ~X86_EFLAGS_TF
;
1584 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_RF
))
1585 rflags
&= ~X86_EFLAGS_RF
;
1590 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
1592 if (to_svm(vcpu
)->nmi_singlestep
)
1593 rflags
|= (X86_EFLAGS_TF
| X86_EFLAGS_RF
);
1596 * Any change of EFLAGS.VM is accompanied by a reload of SS
1597 * (caused by either a task switch or an inter-privilege IRET),
1598 * so we do not need to update the CPL here.
1600 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
1603 static bool svm_get_if_flag(struct kvm_vcpu
*vcpu
)
1605 struct vmcb
*vmcb
= to_svm(vcpu
)->vmcb
;
1607 return sev_es_guest(vcpu
->kvm
)
1608 ? vmcb
->control
.int_state
& SVM_GUEST_INTERRUPT_MASK
1609 : kvm_get_rflags(vcpu
) & X86_EFLAGS_IF
;
1612 static void svm_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
)
1614 kvm_register_mark_available(vcpu
, reg
);
1617 case VCPU_EXREG_PDPTR
:
1619 * When !npt_enabled, mmu->pdptrs[] is already available since
1620 * it is always updated per SDM when moving to CRs.
1623 load_pdptrs(vcpu
, kvm_read_cr3(vcpu
));
1626 KVM_BUG_ON(1, vcpu
->kvm
);
1630 static void svm_set_vintr(struct vcpu_svm
*svm
)
1632 struct vmcb_control_area
*control
;
1635 * The following fields are ignored when AVIC is enabled
1637 WARN_ON(kvm_vcpu_apicv_activated(&svm
->vcpu
));
1639 svm_set_intercept(svm
, INTERCEPT_VINTR
);
1642 * Recalculating intercepts may have cleared the VINTR intercept. If
1643 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1644 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1645 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1646 * interrupts will never be unblocked while L2 is running.
1648 if (!svm_is_intercept(svm
, INTERCEPT_VINTR
))
1652 * This is just a dummy VINTR to actually cause a vmexit to happen.
1653 * Actual injection of virtual interrupts happens through EVENTINJ.
1655 control
= &svm
->vmcb
->control
;
1656 control
->int_vector
= 0x0;
1657 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1658 control
->int_ctl
|= V_IRQ_MASK
|
1659 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1660 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTR
);
1663 static void svm_clear_vintr(struct vcpu_svm
*svm
)
1665 svm_clr_intercept(svm
, INTERCEPT_VINTR
);
1667 /* Drop int_ctl fields related to VINTR injection. */
1668 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_INJECTION_BITS_MASK
;
1669 if (is_guest_mode(&svm
->vcpu
)) {
1670 svm
->vmcb01
.ptr
->control
.int_ctl
&= ~V_IRQ_INJECTION_BITS_MASK
;
1672 WARN_ON((svm
->vmcb
->control
.int_ctl
& V_TPR_MASK
) !=
1673 (svm
->nested
.ctl
.int_ctl
& V_TPR_MASK
));
1675 svm
->vmcb
->control
.int_ctl
|= svm
->nested
.ctl
.int_ctl
&
1676 V_IRQ_INJECTION_BITS_MASK
;
1678 svm
->vmcb
->control
.int_vector
= svm
->nested
.ctl
.int_vector
;
1681 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTR
);
1684 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
1686 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
1687 struct vmcb_save_area
*save01
= &to_svm(vcpu
)->vmcb01
.ptr
->save
;
1690 case VCPU_SREG_CS
: return &save
->cs
;
1691 case VCPU_SREG_DS
: return &save
->ds
;
1692 case VCPU_SREG_ES
: return &save
->es
;
1693 case VCPU_SREG_FS
: return &save01
->fs
;
1694 case VCPU_SREG_GS
: return &save01
->gs
;
1695 case VCPU_SREG_SS
: return &save
->ss
;
1696 case VCPU_SREG_TR
: return &save01
->tr
;
1697 case VCPU_SREG_LDTR
: return &save01
->ldtr
;
1703 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
1705 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1710 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
1711 struct kvm_segment
*var
, int seg
)
1713 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1715 var
->base
= s
->base
;
1716 var
->limit
= s
->limit
;
1717 var
->selector
= s
->selector
;
1718 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
1719 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
1720 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
1721 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
1722 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
1723 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
1724 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
1727 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1728 * However, the SVM spec states that the G bit is not observed by the
1729 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1730 * So let's synthesize a legal G bit for all segments, this helps
1731 * running KVM nested. It also helps cross-vendor migration, because
1732 * Intel's vmentry has a check on the 'G' bit.
1734 var
->g
= s
->limit
> 0xfffff;
1737 * AMD's VMCB does not have an explicit unusable field, so emulate it
1738 * for cross vendor migration purposes by "not present"
1740 var
->unusable
= !var
->present
;
1745 * Work around a bug where the busy flag in the tr selector
1755 * The accessed bit must always be set in the segment
1756 * descriptor cache, although it can be cleared in the
1757 * descriptor, the cached bit always remains at 1. Since
1758 * Intel has a check on this, set it here to support
1759 * cross-vendor migration.
1766 * On AMD CPUs sometimes the DB bit in the segment
1767 * descriptor is left as 1, although the whole segment has
1768 * been made unusable. Clear it here to pass an Intel VMX
1769 * entry check when cross vendor migrating.
1773 /* This is symmetric with svm_set_segment() */
1774 var
->dpl
= to_svm(vcpu
)->vmcb
->save
.cpl
;
1779 static int svm_get_cpl(struct kvm_vcpu
*vcpu
)
1781 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
1786 static void svm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
1788 struct kvm_segment cs
;
1790 svm_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
1795 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1797 struct vcpu_svm
*svm
= to_svm(vcpu
);
1799 dt
->size
= svm
->vmcb
->save
.idtr
.limit
;
1800 dt
->address
= svm
->vmcb
->save
.idtr
.base
;
1803 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1805 struct vcpu_svm
*svm
= to_svm(vcpu
);
1807 svm
->vmcb
->save
.idtr
.limit
= dt
->size
;
1808 svm
->vmcb
->save
.idtr
.base
= dt
->address
;
1809 vmcb_mark_dirty(svm
->vmcb
, VMCB_DT
);
1812 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1814 struct vcpu_svm
*svm
= to_svm(vcpu
);
1816 dt
->size
= svm
->vmcb
->save
.gdtr
.limit
;
1817 dt
->address
= svm
->vmcb
->save
.gdtr
.base
;
1820 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1822 struct vcpu_svm
*svm
= to_svm(vcpu
);
1824 svm
->vmcb
->save
.gdtr
.limit
= dt
->size
;
1825 svm
->vmcb
->save
.gdtr
.base
= dt
->address
;
1826 vmcb_mark_dirty(svm
->vmcb
, VMCB_DT
);
1829 static void sev_post_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
1831 struct vcpu_svm
*svm
= to_svm(vcpu
);
1834 * For guests that don't set guest_state_protected, the cr3 update is
1835 * handled via kvm_mmu_load() while entering the guest. For guests
1836 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1837 * VMCB save area now, since the save area will become the initial
1838 * contents of the VMSA, and future VMCB save area updates won't be
1841 if (sev_es_guest(vcpu
->kvm
)) {
1842 svm
->vmcb
->save
.cr3
= cr3
;
1843 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
1847 static bool svm_is_valid_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
1852 void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
1854 struct vcpu_svm
*svm
= to_svm(vcpu
);
1856 bool old_paging
= is_paging(vcpu
);
1858 #ifdef CONFIG_X86_64
1859 if (vcpu
->arch
.efer
& EFER_LME
) {
1860 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
1861 vcpu
->arch
.efer
|= EFER_LMA
;
1862 if (!vcpu
->arch
.guest_state_protected
)
1863 svm
->vmcb
->save
.efer
|= EFER_LMA
| EFER_LME
;
1866 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
)) {
1867 vcpu
->arch
.efer
&= ~EFER_LMA
;
1868 if (!vcpu
->arch
.guest_state_protected
)
1869 svm
->vmcb
->save
.efer
&= ~(EFER_LMA
| EFER_LME
);
1873 vcpu
->arch
.cr0
= cr0
;
1876 hcr0
|= X86_CR0_PG
| X86_CR0_WP
;
1877 if (old_paging
!= is_paging(vcpu
))
1878 svm_set_cr4(vcpu
, kvm_read_cr4(vcpu
));
1882 * re-enable caching here because the QEMU bios
1883 * does not do it - this results in some delay at
1886 if (kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_CD_NW_CLEARED
))
1887 hcr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
1889 svm
->vmcb
->save
.cr0
= hcr0
;
1890 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
1893 * SEV-ES guests must always keep the CR intercepts cleared. CR
1894 * tracking is done using the CR write traps.
1896 if (sev_es_guest(vcpu
->kvm
))
1900 /* Selective CR0 write remains on. */
1901 svm_clr_intercept(svm
, INTERCEPT_CR0_READ
);
1902 svm_clr_intercept(svm
, INTERCEPT_CR0_WRITE
);
1904 svm_set_intercept(svm
, INTERCEPT_CR0_READ
);
1905 svm_set_intercept(svm
, INTERCEPT_CR0_WRITE
);
1909 static bool svm_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1914 void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1916 unsigned long host_cr4_mce
= cr4_read_shadow() & X86_CR4_MCE
;
1917 unsigned long old_cr4
= vcpu
->arch
.cr4
;
1919 if (npt_enabled
&& ((old_cr4
^ cr4
) & X86_CR4_PGE
))
1920 svm_flush_tlb_current(vcpu
);
1922 vcpu
->arch
.cr4
= cr4
;
1926 if (!is_paging(vcpu
))
1927 cr4
&= ~(X86_CR4_SMEP
| X86_CR4_SMAP
| X86_CR4_PKE
);
1929 cr4
|= host_cr4_mce
;
1930 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
;
1931 vmcb_mark_dirty(to_svm(vcpu
)->vmcb
, VMCB_CR
);
1933 if ((cr4
^ old_cr4
) & (X86_CR4_OSXSAVE
| X86_CR4_PKE
))
1934 kvm_update_cpuid_runtime(vcpu
);
1937 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
1938 struct kvm_segment
*var
, int seg
)
1940 struct vcpu_svm
*svm
= to_svm(vcpu
);
1941 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1943 s
->base
= var
->base
;
1944 s
->limit
= var
->limit
;
1945 s
->selector
= var
->selector
;
1946 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
1947 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
1948 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
1949 s
->attrib
|= ((var
->present
& 1) && !var
->unusable
) << SVM_SELECTOR_P_SHIFT
;
1950 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
1951 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
1952 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
1953 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
1956 * This is always accurate, except if SYSRET returned to a segment
1957 * with SS.DPL != 3. Intel does not have this quirk, and always
1958 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1959 * would entail passing the CPL to userspace and back.
1961 if (seg
== VCPU_SREG_SS
)
1962 /* This is symmetric with svm_get_segment() */
1963 svm
->vmcb
->save
.cpl
= (var
->dpl
& 3);
1965 vmcb_mark_dirty(svm
->vmcb
, VMCB_SEG
);
1968 static void svm_update_exception_bitmap(struct kvm_vcpu
*vcpu
)
1970 struct vcpu_svm
*svm
= to_svm(vcpu
);
1972 clr_exception_intercept(svm
, BP_VECTOR
);
1974 if (vcpu
->guest_debug
& KVM_GUESTDBG_ENABLE
) {
1975 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
1976 set_exception_intercept(svm
, BP_VECTOR
);
1980 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*sd
)
1982 if (sd
->next_asid
> sd
->max_asid
) {
1983 ++sd
->asid_generation
;
1984 sd
->next_asid
= sd
->min_asid
;
1985 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
1986 vmcb_mark_dirty(svm
->vmcb
, VMCB_ASID
);
1989 svm
->current_vmcb
->asid_generation
= sd
->asid_generation
;
1990 svm
->asid
= sd
->next_asid
++;
1993 static void svm_set_dr6(struct vcpu_svm
*svm
, unsigned long value
)
1995 struct vmcb
*vmcb
= svm
->vmcb
;
1997 if (svm
->vcpu
.arch
.guest_state_protected
)
2000 if (unlikely(value
!= vmcb
->save
.dr6
)) {
2001 vmcb
->save
.dr6
= value
;
2002 vmcb_mark_dirty(vmcb
, VMCB_DR
);
2006 static void svm_sync_dirty_debug_regs(struct kvm_vcpu
*vcpu
)
2008 struct vcpu_svm
*svm
= to_svm(vcpu
);
2010 if (WARN_ON_ONCE(sev_es_guest(vcpu
->kvm
)))
2013 get_debugreg(vcpu
->arch
.db
[0], 0);
2014 get_debugreg(vcpu
->arch
.db
[1], 1);
2015 get_debugreg(vcpu
->arch
.db
[2], 2);
2016 get_debugreg(vcpu
->arch
.db
[3], 3);
2018 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
2019 * because db_interception might need it. We can do it before vmentry.
2021 vcpu
->arch
.dr6
= svm
->vmcb
->save
.dr6
;
2022 vcpu
->arch
.dr7
= svm
->vmcb
->save
.dr7
;
2023 vcpu
->arch
.switch_db_regs
&= ~KVM_DEBUGREG_WONT_EXIT
;
2024 set_dr_intercepts(svm
);
2027 static void svm_set_dr7(struct kvm_vcpu
*vcpu
, unsigned long value
)
2029 struct vcpu_svm
*svm
= to_svm(vcpu
);
2031 if (vcpu
->arch
.guest_state_protected
)
2034 svm
->vmcb
->save
.dr7
= value
;
2035 vmcb_mark_dirty(svm
->vmcb
, VMCB_DR
);
2038 static int pf_interception(struct kvm_vcpu
*vcpu
)
2040 struct vcpu_svm
*svm
= to_svm(vcpu
);
2042 u64 fault_address
= svm
->vmcb
->control
.exit_info_2
;
2043 u64 error_code
= svm
->vmcb
->control
.exit_info_1
;
2045 return kvm_handle_page_fault(vcpu
, error_code
, fault_address
,
2046 static_cpu_has(X86_FEATURE_DECODEASSISTS
) ?
2047 svm
->vmcb
->control
.insn_bytes
: NULL
,
2048 svm
->vmcb
->control
.insn_len
);
2051 static int npf_interception(struct kvm_vcpu
*vcpu
)
2053 struct vcpu_svm
*svm
= to_svm(vcpu
);
2055 u64 fault_address
= svm
->vmcb
->control
.exit_info_2
;
2056 u64 error_code
= svm
->vmcb
->control
.exit_info_1
;
2058 trace_kvm_page_fault(vcpu
, fault_address
, error_code
);
2059 return kvm_mmu_page_fault(vcpu
, fault_address
, error_code
,
2060 static_cpu_has(X86_FEATURE_DECODEASSISTS
) ?
2061 svm
->vmcb
->control
.insn_bytes
: NULL
,
2062 svm
->vmcb
->control
.insn_len
);
2065 static int db_interception(struct kvm_vcpu
*vcpu
)
2067 struct kvm_run
*kvm_run
= vcpu
->run
;
2068 struct vcpu_svm
*svm
= to_svm(vcpu
);
2070 if (!(vcpu
->guest_debug
&
2071 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
)) &&
2072 !svm
->nmi_singlestep
) {
2073 u32 payload
= svm
->vmcb
->save
.dr6
^ DR6_ACTIVE_LOW
;
2074 kvm_queue_exception_p(vcpu
, DB_VECTOR
, payload
);
2078 if (svm
->nmi_singlestep
) {
2079 disable_nmi_singlestep(svm
);
2080 /* Make sure we check for pending NMIs upon entry */
2081 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2084 if (vcpu
->guest_debug
&
2085 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
)) {
2086 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
2087 kvm_run
->debug
.arch
.dr6
= svm
->vmcb
->save
.dr6
;
2088 kvm_run
->debug
.arch
.dr7
= svm
->vmcb
->save
.dr7
;
2089 kvm_run
->debug
.arch
.pc
=
2090 svm
->vmcb
->save
.cs
.base
+ svm
->vmcb
->save
.rip
;
2091 kvm_run
->debug
.arch
.exception
= DB_VECTOR
;
2098 static int bp_interception(struct kvm_vcpu
*vcpu
)
2100 struct vcpu_svm
*svm
= to_svm(vcpu
);
2101 struct kvm_run
*kvm_run
= vcpu
->run
;
2103 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
2104 kvm_run
->debug
.arch
.pc
= svm
->vmcb
->save
.cs
.base
+ svm
->vmcb
->save
.rip
;
2105 kvm_run
->debug
.arch
.exception
= BP_VECTOR
;
2109 static int ud_interception(struct kvm_vcpu
*vcpu
)
2111 return handle_ud(vcpu
);
2114 static int ac_interception(struct kvm_vcpu
*vcpu
)
2116 kvm_queue_exception_e(vcpu
, AC_VECTOR
, 0);
2120 static bool is_erratum_383(void)
2125 if (!erratum_383_found
)
2128 value
= native_read_msr_safe(MSR_IA32_MC0_STATUS
, &err
);
2132 /* Bit 62 may or may not be set for this mce */
2133 value
&= ~(1ULL << 62);
2135 if (value
!= 0xb600000000010015ULL
)
2138 /* Clear MCi_STATUS registers */
2139 for (i
= 0; i
< 6; ++i
)
2140 native_write_msr_safe(MSR_IA32_MCx_STATUS(i
), 0, 0);
2142 value
= native_read_msr_safe(MSR_IA32_MCG_STATUS
, &err
);
2146 value
&= ~(1ULL << 2);
2147 low
= lower_32_bits(value
);
2148 high
= upper_32_bits(value
);
2150 native_write_msr_safe(MSR_IA32_MCG_STATUS
, low
, high
);
2153 /* Flush tlb to evict multi-match entries */
2159 static void svm_handle_mce(struct kvm_vcpu
*vcpu
)
2161 if (is_erratum_383()) {
2163 * Erratum 383 triggered. Guest state is corrupt so kill the
2166 pr_err("Guest triggered AMD Erratum 383\n");
2168 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
2174 * On an #MC intercept the MCE handler is not called automatically in
2175 * the host. So do it by hand here.
2177 kvm_machine_check();
2180 static int mc_interception(struct kvm_vcpu
*vcpu
)
2185 static int shutdown_interception(struct kvm_vcpu
*vcpu
)
2187 struct kvm_run
*kvm_run
= vcpu
->run
;
2188 struct vcpu_svm
*svm
= to_svm(vcpu
);
2192 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
2193 * the VMCB in a known good state. Unfortuately, KVM doesn't have
2194 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2195 * userspace. At a platform view, INIT is acceptable behavior as
2196 * there exist bare metal platforms that automatically INIT the CPU
2197 * in response to shutdown.
2199 * The VM save area for SEV-ES guests has already been encrypted so it
2200 * cannot be reinitialized, i.e. synthesizing INIT is futile.
2202 if (!sev_es_guest(vcpu
->kvm
)) {
2203 clear_page(svm
->vmcb
);
2204 kvm_vcpu_reset(vcpu
, true);
2207 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
2211 static int io_interception(struct kvm_vcpu
*vcpu
)
2213 struct vcpu_svm
*svm
= to_svm(vcpu
);
2214 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; /* address size bug? */
2215 int size
, in
, string
;
2218 ++vcpu
->stat
.io_exits
;
2219 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
2220 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
2221 port
= io_info
>> 16;
2222 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
2225 if (sev_es_guest(vcpu
->kvm
))
2226 return sev_es_string_io(svm
, size
, port
, in
);
2228 return kvm_emulate_instruction(vcpu
, 0);
2231 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
2233 return kvm_fast_pio(vcpu
, size
, port
, in
);
2236 static int nmi_interception(struct kvm_vcpu
*vcpu
)
2241 static int smi_interception(struct kvm_vcpu
*vcpu
)
2246 static int intr_interception(struct kvm_vcpu
*vcpu
)
2248 ++vcpu
->stat
.irq_exits
;
2252 static int vmload_vmsave_interception(struct kvm_vcpu
*vcpu
, bool vmload
)
2254 struct vcpu_svm
*svm
= to_svm(vcpu
);
2255 struct vmcb
*vmcb12
;
2256 struct kvm_host_map map
;
2259 if (nested_svm_check_permissions(vcpu
))
2262 ret
= kvm_vcpu_map(vcpu
, gpa_to_gfn(svm
->vmcb
->save
.rax
), &map
);
2265 kvm_inject_gp(vcpu
, 0);
2271 ret
= kvm_skip_emulated_instruction(vcpu
);
2274 svm_copy_vmloadsave_state(svm
->vmcb
, vmcb12
);
2275 svm
->sysenter_eip_hi
= 0;
2276 svm
->sysenter_esp_hi
= 0;
2278 svm_copy_vmloadsave_state(vmcb12
, svm
->vmcb
);
2281 kvm_vcpu_unmap(vcpu
, &map
, true);
2286 static int vmload_interception(struct kvm_vcpu
*vcpu
)
2288 return vmload_vmsave_interception(vcpu
, true);
2291 static int vmsave_interception(struct kvm_vcpu
*vcpu
)
2293 return vmload_vmsave_interception(vcpu
, false);
2296 static int vmrun_interception(struct kvm_vcpu
*vcpu
)
2298 if (nested_svm_check_permissions(vcpu
))
2301 return nested_svm_vmrun(vcpu
);
2311 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2312 static int svm_instr_opcode(struct kvm_vcpu
*vcpu
)
2314 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
2316 if (ctxt
->b
!= 0x1 || ctxt
->opcode_len
!= 2)
2317 return NONE_SVM_INSTR
;
2319 switch (ctxt
->modrm
) {
2320 case 0xd8: /* VMRUN */
2321 return SVM_INSTR_VMRUN
;
2322 case 0xda: /* VMLOAD */
2323 return SVM_INSTR_VMLOAD
;
2324 case 0xdb: /* VMSAVE */
2325 return SVM_INSTR_VMSAVE
;
2330 return NONE_SVM_INSTR
;
2333 static int emulate_svm_instr(struct kvm_vcpu
*vcpu
, int opcode
)
2335 const int guest_mode_exit_codes
[] = {
2336 [SVM_INSTR_VMRUN
] = SVM_EXIT_VMRUN
,
2337 [SVM_INSTR_VMLOAD
] = SVM_EXIT_VMLOAD
,
2338 [SVM_INSTR_VMSAVE
] = SVM_EXIT_VMSAVE
,
2340 int (*const svm_instr_handlers
[])(struct kvm_vcpu
*vcpu
) = {
2341 [SVM_INSTR_VMRUN
] = vmrun_interception
,
2342 [SVM_INSTR_VMLOAD
] = vmload_interception
,
2343 [SVM_INSTR_VMSAVE
] = vmsave_interception
,
2345 struct vcpu_svm
*svm
= to_svm(vcpu
);
2348 if (is_guest_mode(vcpu
)) {
2349 /* Returns '1' or -errno on failure, '0' on success. */
2350 ret
= nested_svm_simple_vmexit(svm
, guest_mode_exit_codes
[opcode
]);
2355 return svm_instr_handlers
[opcode
](vcpu
);
2359 * #GP handling code. Note that #GP can be triggered under the following two
2361 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2362 * some AMD CPUs when EAX of these instructions are in the reserved memory
2363 * regions (e.g. SMM memory on host).
2364 * 2) VMware backdoor
2366 static int gp_interception(struct kvm_vcpu
*vcpu
)
2368 struct vcpu_svm
*svm
= to_svm(vcpu
);
2369 u32 error_code
= svm
->vmcb
->control
.exit_info_1
;
2372 /* Both #GP cases have zero error_code */
2376 /* Decode the instruction for usage later */
2377 if (x86_decode_emulated_instruction(vcpu
, 0, NULL
, 0) != EMULATION_OK
)
2380 opcode
= svm_instr_opcode(vcpu
);
2382 if (opcode
== NONE_SVM_INSTR
) {
2383 if (!enable_vmware_backdoor
)
2387 * VMware backdoor emulation on #GP interception only handles
2388 * IN{S}, OUT{S}, and RDPMC.
2390 if (!is_guest_mode(vcpu
))
2391 return kvm_emulate_instruction(vcpu
,
2392 EMULTYPE_VMWARE_GP
| EMULTYPE_NO_DECODE
);
2394 /* All SVM instructions expect page aligned RAX */
2395 if (svm
->vmcb
->save
.rax
& ~PAGE_MASK
)
2398 return emulate_svm_instr(vcpu
, opcode
);
2402 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
2406 void svm_set_gif(struct vcpu_svm
*svm
, bool value
)
2410 * If VGIF is enabled, the STGI intercept is only added to
2411 * detect the opening of the SMI/NMI window; remove it now.
2412 * Likewise, clear the VINTR intercept, we will set it
2413 * again while processing KVM_REQ_EVENT if needed.
2416 svm_clr_intercept(svm
, INTERCEPT_STGI
);
2417 if (svm_is_intercept(svm
, INTERCEPT_VINTR
))
2418 svm_clear_vintr(svm
);
2421 if (svm
->vcpu
.arch
.smi_pending
||
2422 svm
->vcpu
.arch
.nmi_pending
||
2423 kvm_cpu_has_injectable_intr(&svm
->vcpu
) ||
2424 kvm_apic_has_pending_init_or_sipi(&svm
->vcpu
))
2425 kvm_make_request(KVM_REQ_EVENT
, &svm
->vcpu
);
2430 * After a CLGI no interrupts should come. But if vGIF is
2431 * in use, we still rely on the VINTR intercept (rather than
2432 * STGI) to detect an open interrupt window.
2435 svm_clear_vintr(svm
);
2439 static int stgi_interception(struct kvm_vcpu
*vcpu
)
2443 if (nested_svm_check_permissions(vcpu
))
2446 ret
= kvm_skip_emulated_instruction(vcpu
);
2447 svm_set_gif(to_svm(vcpu
), true);
2451 static int clgi_interception(struct kvm_vcpu
*vcpu
)
2455 if (nested_svm_check_permissions(vcpu
))
2458 ret
= kvm_skip_emulated_instruction(vcpu
);
2459 svm_set_gif(to_svm(vcpu
), false);
2463 static int invlpga_interception(struct kvm_vcpu
*vcpu
)
2465 gva_t gva
= kvm_rax_read(vcpu
);
2466 u32 asid
= kvm_rcx_read(vcpu
);
2468 /* FIXME: Handle an address size prefix. */
2469 if (!is_long_mode(vcpu
))
2472 trace_kvm_invlpga(to_svm(vcpu
)->vmcb
->save
.rip
, asid
, gva
);
2474 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2475 kvm_mmu_invlpg(vcpu
, gva
);
2477 return kvm_skip_emulated_instruction(vcpu
);
2480 static int skinit_interception(struct kvm_vcpu
*vcpu
)
2482 trace_kvm_skinit(to_svm(vcpu
)->vmcb
->save
.rip
, kvm_rax_read(vcpu
));
2484 kvm_queue_exception(vcpu
, UD_VECTOR
);
2488 static int task_switch_interception(struct kvm_vcpu
*vcpu
)
2490 struct vcpu_svm
*svm
= to_svm(vcpu
);
2493 int int_type
= svm
->vmcb
->control
.exit_int_info
&
2494 SVM_EXITINTINFO_TYPE_MASK
;
2495 int int_vec
= svm
->vmcb
->control
.exit_int_info
& SVM_EVTINJ_VEC_MASK
;
2497 svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_TYPE_MASK
;
2499 svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_VALID
;
2500 bool has_error_code
= false;
2503 tss_selector
= (u16
)svm
->vmcb
->control
.exit_info_1
;
2505 if (svm
->vmcb
->control
.exit_info_2
&
2506 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET
))
2507 reason
= TASK_SWITCH_IRET
;
2508 else if (svm
->vmcb
->control
.exit_info_2
&
2509 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP
))
2510 reason
= TASK_SWITCH_JMP
;
2512 reason
= TASK_SWITCH_GATE
;
2514 reason
= TASK_SWITCH_CALL
;
2516 if (reason
== TASK_SWITCH_GATE
) {
2518 case SVM_EXITINTINFO_TYPE_NMI
:
2519 vcpu
->arch
.nmi_injected
= false;
2521 case SVM_EXITINTINFO_TYPE_EXEPT
:
2522 if (svm
->vmcb
->control
.exit_info_2
&
2523 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE
)) {
2524 has_error_code
= true;
2526 (u32
)svm
->vmcb
->control
.exit_info_2
;
2528 kvm_clear_exception_queue(vcpu
);
2530 case SVM_EXITINTINFO_TYPE_INTR
:
2531 case SVM_EXITINTINFO_TYPE_SOFT
:
2532 kvm_clear_interrupt_queue(vcpu
);
2539 if (reason
!= TASK_SWITCH_GATE
||
2540 int_type
== SVM_EXITINTINFO_TYPE_SOFT
||
2541 (int_type
== SVM_EXITINTINFO_TYPE_EXEPT
&&
2542 (int_vec
== OF_VECTOR
|| int_vec
== BP_VECTOR
))) {
2543 if (!svm_skip_emulated_instruction(vcpu
))
2547 if (int_type
!= SVM_EXITINTINFO_TYPE_SOFT
)
2550 return kvm_task_switch(vcpu
, tss_selector
, int_vec
, reason
,
2551 has_error_code
, error_code
);
2554 static void svm_clr_iret_intercept(struct vcpu_svm
*svm
)
2556 if (!sev_es_guest(svm
->vcpu
.kvm
))
2557 svm_clr_intercept(svm
, INTERCEPT_IRET
);
2560 static void svm_set_iret_intercept(struct vcpu_svm
*svm
)
2562 if (!sev_es_guest(svm
->vcpu
.kvm
))
2563 svm_set_intercept(svm
, INTERCEPT_IRET
);
2566 static int iret_interception(struct kvm_vcpu
*vcpu
)
2568 struct vcpu_svm
*svm
= to_svm(vcpu
);
2570 WARN_ON_ONCE(sev_es_guest(vcpu
->kvm
));
2572 ++vcpu
->stat
.nmi_window_exits
;
2573 svm
->awaiting_iret_completion
= true;
2575 svm_clr_iret_intercept(svm
);
2576 svm
->nmi_iret_rip
= kvm_rip_read(vcpu
);
2578 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2582 static int invlpg_interception(struct kvm_vcpu
*vcpu
)
2584 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS
))
2585 return kvm_emulate_instruction(vcpu
, 0);
2587 kvm_mmu_invlpg(vcpu
, to_svm(vcpu
)->vmcb
->control
.exit_info_1
);
2588 return kvm_skip_emulated_instruction(vcpu
);
2591 static int emulate_on_interception(struct kvm_vcpu
*vcpu
)
2593 return kvm_emulate_instruction(vcpu
, 0);
2596 static int rsm_interception(struct kvm_vcpu
*vcpu
)
2598 return kvm_emulate_instruction_from_buffer(vcpu
, rsm_ins_bytes
, 2);
2601 static bool check_selective_cr0_intercepted(struct kvm_vcpu
*vcpu
,
2604 struct vcpu_svm
*svm
= to_svm(vcpu
);
2605 unsigned long cr0
= vcpu
->arch
.cr0
;
2608 if (!is_guest_mode(vcpu
) ||
2609 (!(vmcb12_is_intercept(&svm
->nested
.ctl
, INTERCEPT_SELECTIVE_CR0
))))
2612 cr0
&= ~SVM_CR0_SELECTIVE_MASK
;
2613 val
&= ~SVM_CR0_SELECTIVE_MASK
;
2616 svm
->vmcb
->control
.exit_code
= SVM_EXIT_CR0_SEL_WRITE
;
2617 ret
= (nested_svm_exit_handled(svm
) == NESTED_EXIT_DONE
);
2623 #define CR_VALID (1ULL << 63)
2625 static int cr_interception(struct kvm_vcpu
*vcpu
)
2627 struct vcpu_svm
*svm
= to_svm(vcpu
);
2632 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS
))
2633 return emulate_on_interception(vcpu
);
2635 if (unlikely((svm
->vmcb
->control
.exit_info_1
& CR_VALID
) == 0))
2636 return emulate_on_interception(vcpu
);
2638 reg
= svm
->vmcb
->control
.exit_info_1
& SVM_EXITINFO_REG_MASK
;
2639 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_CR0_SEL_WRITE
)
2640 cr
= SVM_EXIT_WRITE_CR0
- SVM_EXIT_READ_CR0
;
2642 cr
= svm
->vmcb
->control
.exit_code
- SVM_EXIT_READ_CR0
;
2645 if (cr
>= 16) { /* mov to cr */
2647 val
= kvm_register_read(vcpu
, reg
);
2648 trace_kvm_cr_write(cr
, val
);
2651 if (!check_selective_cr0_intercepted(vcpu
, val
))
2652 err
= kvm_set_cr0(vcpu
, val
);
2658 err
= kvm_set_cr3(vcpu
, val
);
2661 err
= kvm_set_cr4(vcpu
, val
);
2664 err
= kvm_set_cr8(vcpu
, val
);
2667 WARN(1, "unhandled write to CR%d", cr
);
2668 kvm_queue_exception(vcpu
, UD_VECTOR
);
2671 } else { /* mov from cr */
2674 val
= kvm_read_cr0(vcpu
);
2677 val
= vcpu
->arch
.cr2
;
2680 val
= kvm_read_cr3(vcpu
);
2683 val
= kvm_read_cr4(vcpu
);
2686 val
= kvm_get_cr8(vcpu
);
2689 WARN(1, "unhandled read from CR%d", cr
);
2690 kvm_queue_exception(vcpu
, UD_VECTOR
);
2693 kvm_register_write(vcpu
, reg
, val
);
2694 trace_kvm_cr_read(cr
, val
);
2696 return kvm_complete_insn_gp(vcpu
, err
);
2699 static int cr_trap(struct kvm_vcpu
*vcpu
)
2701 struct vcpu_svm
*svm
= to_svm(vcpu
);
2702 unsigned long old_value
, new_value
;
2706 new_value
= (unsigned long)svm
->vmcb
->control
.exit_info_1
;
2708 cr
= svm
->vmcb
->control
.exit_code
- SVM_EXIT_CR0_WRITE_TRAP
;
2711 old_value
= kvm_read_cr0(vcpu
);
2712 svm_set_cr0(vcpu
, new_value
);
2714 kvm_post_set_cr0(vcpu
, old_value
, new_value
);
2717 old_value
= kvm_read_cr4(vcpu
);
2718 svm_set_cr4(vcpu
, new_value
);
2720 kvm_post_set_cr4(vcpu
, old_value
, new_value
);
2723 ret
= kvm_set_cr8(vcpu
, new_value
);
2726 WARN(1, "unhandled CR%d write trap", cr
);
2727 kvm_queue_exception(vcpu
, UD_VECTOR
);
2731 return kvm_complete_insn_gp(vcpu
, ret
);
2734 static int dr_interception(struct kvm_vcpu
*vcpu
)
2736 struct vcpu_svm
*svm
= to_svm(vcpu
);
2742 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
2743 * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
2745 if (sev_es_guest(vcpu
->kvm
))
2748 if (vcpu
->guest_debug
== 0) {
2750 * No more DR vmexits; force a reload of the debug registers
2751 * and reenter on this instruction. The next vmexit will
2752 * retrieve the full state of the debug registers.
2754 clr_dr_intercepts(svm
);
2755 vcpu
->arch
.switch_db_regs
|= KVM_DEBUGREG_WONT_EXIT
;
2759 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS
))
2760 return emulate_on_interception(vcpu
);
2762 reg
= svm
->vmcb
->control
.exit_info_1
& SVM_EXITINFO_REG_MASK
;
2763 dr
= svm
->vmcb
->control
.exit_code
- SVM_EXIT_READ_DR0
;
2764 if (dr
>= 16) { /* mov to DRn */
2766 val
= kvm_register_read(vcpu
, reg
);
2767 err
= kvm_set_dr(vcpu
, dr
, val
);
2769 kvm_get_dr(vcpu
, dr
, &val
);
2770 kvm_register_write(vcpu
, reg
, val
);
2773 return kvm_complete_insn_gp(vcpu
, err
);
2776 static int cr8_write_interception(struct kvm_vcpu
*vcpu
)
2780 u8 cr8_prev
= kvm_get_cr8(vcpu
);
2781 /* instruction emulation calls kvm_set_cr8() */
2782 r
= cr_interception(vcpu
);
2783 if (lapic_in_kernel(vcpu
))
2785 if (cr8_prev
<= kvm_get_cr8(vcpu
))
2787 vcpu
->run
->exit_reason
= KVM_EXIT_SET_TPR
;
2791 static int efer_trap(struct kvm_vcpu
*vcpu
)
2793 struct msr_data msr_info
;
2797 * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2798 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2799 * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2800 * the guest doesn't have X86_FEATURE_SVM.
2802 msr_info
.host_initiated
= false;
2803 msr_info
.index
= MSR_EFER
;
2804 msr_info
.data
= to_svm(vcpu
)->vmcb
->control
.exit_info_1
& ~EFER_SVME
;
2805 ret
= kvm_set_msr_common(vcpu
, &msr_info
);
2807 return kvm_complete_insn_gp(vcpu
, ret
);
2810 static int svm_get_msr_feature(struct kvm_msr_entry
*msr
)
2814 switch (msr
->index
) {
2815 case MSR_AMD64_DE_CFG
:
2816 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC
))
2817 msr
->data
|= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE
;
2820 return KVM_MSR_RET_INVALID
;
2826 static int svm_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
2828 struct vcpu_svm
*svm
= to_svm(vcpu
);
2830 switch (msr_info
->index
) {
2831 case MSR_AMD64_TSC_RATIO
:
2832 if (!msr_info
->host_initiated
&&
2833 !guest_can_use(vcpu
, X86_FEATURE_TSCRATEMSR
))
2835 msr_info
->data
= svm
->tsc_ratio_msr
;
2838 msr_info
->data
= svm
->vmcb01
.ptr
->save
.star
;
2840 #ifdef CONFIG_X86_64
2842 msr_info
->data
= svm
->vmcb01
.ptr
->save
.lstar
;
2845 msr_info
->data
= svm
->vmcb01
.ptr
->save
.cstar
;
2847 case MSR_KERNEL_GS_BASE
:
2848 msr_info
->data
= svm
->vmcb01
.ptr
->save
.kernel_gs_base
;
2850 case MSR_SYSCALL_MASK
:
2851 msr_info
->data
= svm
->vmcb01
.ptr
->save
.sfmask
;
2854 case MSR_IA32_SYSENTER_CS
:
2855 msr_info
->data
= svm
->vmcb01
.ptr
->save
.sysenter_cs
;
2857 case MSR_IA32_SYSENTER_EIP
:
2858 msr_info
->data
= (u32
)svm
->vmcb01
.ptr
->save
.sysenter_eip
;
2859 if (guest_cpuid_is_intel(vcpu
))
2860 msr_info
->data
|= (u64
)svm
->sysenter_eip_hi
<< 32;
2862 case MSR_IA32_SYSENTER_ESP
:
2863 msr_info
->data
= svm
->vmcb01
.ptr
->save
.sysenter_esp
;
2864 if (guest_cpuid_is_intel(vcpu
))
2865 msr_info
->data
|= (u64
)svm
->sysenter_esp_hi
<< 32;
2868 msr_info
->data
= svm
->tsc_aux
;
2870 case MSR_IA32_DEBUGCTLMSR
:
2871 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.dbgctl
;
2873 case MSR_IA32_LASTBRANCHFROMIP
:
2874 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.br_from
;
2876 case MSR_IA32_LASTBRANCHTOIP
:
2877 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.br_to
;
2879 case MSR_IA32_LASTINTFROMIP
:
2880 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.last_excp_from
;
2882 case MSR_IA32_LASTINTTOIP
:
2883 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.last_excp_to
;
2885 case MSR_VM_HSAVE_PA
:
2886 msr_info
->data
= svm
->nested
.hsave_msr
;
2889 msr_info
->data
= svm
->nested
.vm_cr_msr
;
2891 case MSR_IA32_SPEC_CTRL
:
2892 if (!msr_info
->host_initiated
&&
2893 !guest_has_spec_ctrl_msr(vcpu
))
2896 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
2897 msr_info
->data
= svm
->vmcb
->save
.spec_ctrl
;
2899 msr_info
->data
= svm
->spec_ctrl
;
2901 case MSR_AMD64_VIRT_SPEC_CTRL
:
2902 if (!msr_info
->host_initiated
&&
2903 !guest_cpuid_has(vcpu
, X86_FEATURE_VIRT_SSBD
))
2906 msr_info
->data
= svm
->virt_spec_ctrl
;
2908 case MSR_F15H_IC_CFG
: {
2912 family
= guest_cpuid_family(vcpu
);
2913 model
= guest_cpuid_model(vcpu
);
2915 if (family
< 0 || model
< 0)
2916 return kvm_get_msr_common(vcpu
, msr_info
);
2920 if (family
== 0x15 &&
2921 (model
>= 0x2 && model
< 0x20))
2922 msr_info
->data
= 0x1E;
2925 case MSR_AMD64_DE_CFG
:
2926 msr_info
->data
= svm
->msr_decfg
;
2929 return kvm_get_msr_common(vcpu
, msr_info
);
2934 static int svm_complete_emulated_msr(struct kvm_vcpu
*vcpu
, int err
)
2936 struct vcpu_svm
*svm
= to_svm(vcpu
);
2937 if (!err
|| !sev_es_guest(vcpu
->kvm
) || WARN_ON_ONCE(!svm
->sev_es
.ghcb
))
2938 return kvm_complete_insn_gp(vcpu
, err
);
2940 ghcb_set_sw_exit_info_1(svm
->sev_es
.ghcb
, 1);
2941 ghcb_set_sw_exit_info_2(svm
->sev_es
.ghcb
,
2943 SVM_EVTINJ_TYPE_EXEPT
|
2948 static int svm_set_vm_cr(struct kvm_vcpu
*vcpu
, u64 data
)
2950 struct vcpu_svm
*svm
= to_svm(vcpu
);
2951 int svm_dis
, chg_mask
;
2953 if (data
& ~SVM_VM_CR_VALID_MASK
)
2956 chg_mask
= SVM_VM_CR_VALID_MASK
;
2958 if (svm
->nested
.vm_cr_msr
& SVM_VM_CR_SVM_DIS_MASK
)
2959 chg_mask
&= ~(SVM_VM_CR_SVM_LOCK_MASK
| SVM_VM_CR_SVM_DIS_MASK
);
2961 svm
->nested
.vm_cr_msr
&= ~chg_mask
;
2962 svm
->nested
.vm_cr_msr
|= (data
& chg_mask
);
2964 svm_dis
= svm
->nested
.vm_cr_msr
& SVM_VM_CR_SVM_DIS_MASK
;
2966 /* check for svm_disable while efer.svme is set */
2967 if (svm_dis
&& (vcpu
->arch
.efer
& EFER_SVME
))
2973 static int svm_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
)
2975 struct vcpu_svm
*svm
= to_svm(vcpu
);
2978 u32 ecx
= msr
->index
;
2979 u64 data
= msr
->data
;
2981 case MSR_AMD64_TSC_RATIO
:
2983 if (!guest_can_use(vcpu
, X86_FEATURE_TSCRATEMSR
)) {
2985 if (!msr
->host_initiated
)
2988 * In case TSC scaling is not enabled, always
2989 * leave this MSR at the default value.
2991 * Due to bug in qemu 6.2.0, it would try to set
2992 * this msr to 0 if tsc scaling is not enabled.
2993 * Ignore this value as well.
2995 if (data
!= 0 && data
!= svm
->tsc_ratio_msr
)
3000 if (data
& SVM_TSC_RATIO_RSVD
)
3003 svm
->tsc_ratio_msr
= data
;
3005 if (guest_can_use(vcpu
, X86_FEATURE_TSCRATEMSR
) &&
3006 is_guest_mode(vcpu
))
3007 nested_svm_update_tsc_ratio_msr(vcpu
);
3010 case MSR_IA32_CR_PAT
:
3011 ret
= kvm_set_msr_common(vcpu
, msr
);
3015 svm
->vmcb01
.ptr
->save
.g_pat
= data
;
3016 if (is_guest_mode(vcpu
))
3017 nested_vmcb02_compute_g_pat(svm
);
3018 vmcb_mark_dirty(svm
->vmcb
, VMCB_NPT
);
3020 case MSR_IA32_SPEC_CTRL
:
3021 if (!msr
->host_initiated
&&
3022 !guest_has_spec_ctrl_msr(vcpu
))
3025 if (kvm_spec_ctrl_test_value(data
))
3028 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
3029 svm
->vmcb
->save
.spec_ctrl
= data
;
3031 svm
->spec_ctrl
= data
;
3037 * When it's written (to non-zero) for the first time, pass
3041 * The handling of the MSR bitmap for L2 guests is done in
3042 * nested_svm_vmrun_msrpm.
3043 * We update the L1 MSR bit as well since it will end up
3044 * touching the MSR anyway now.
3046 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SPEC_CTRL
, 1, 1);
3048 case MSR_AMD64_VIRT_SPEC_CTRL
:
3049 if (!msr
->host_initiated
&&
3050 !guest_cpuid_has(vcpu
, X86_FEATURE_VIRT_SSBD
))
3053 if (data
& ~SPEC_CTRL_SSBD
)
3056 svm
->virt_spec_ctrl
= data
;
3059 svm
->vmcb01
.ptr
->save
.star
= data
;
3061 #ifdef CONFIG_X86_64
3063 svm
->vmcb01
.ptr
->save
.lstar
= data
;
3066 svm
->vmcb01
.ptr
->save
.cstar
= data
;
3068 case MSR_KERNEL_GS_BASE
:
3069 svm
->vmcb01
.ptr
->save
.kernel_gs_base
= data
;
3071 case MSR_SYSCALL_MASK
:
3072 svm
->vmcb01
.ptr
->save
.sfmask
= data
;
3075 case MSR_IA32_SYSENTER_CS
:
3076 svm
->vmcb01
.ptr
->save
.sysenter_cs
= data
;
3078 case MSR_IA32_SYSENTER_EIP
:
3079 svm
->vmcb01
.ptr
->save
.sysenter_eip
= (u32
)data
;
3081 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3082 * when we spoof an Intel vendor ID (for cross vendor migration).
3083 * In this case we use this intercept to track the high
3084 * 32 bit part of these msrs to support Intel's
3085 * implementation of SYSENTER/SYSEXIT.
3087 svm
->sysenter_eip_hi
= guest_cpuid_is_intel(vcpu
) ? (data
>> 32) : 0;
3089 case MSR_IA32_SYSENTER_ESP
:
3090 svm
->vmcb01
.ptr
->save
.sysenter_esp
= (u32
)data
;
3091 svm
->sysenter_esp_hi
= guest_cpuid_is_intel(vcpu
) ? (data
>> 32) : 0;
3095 * TSC_AUX is always virtualized for SEV-ES guests when the
3096 * feature is available. The user return MSR support is not
3097 * required in this case because TSC_AUX is restored on #VMEXIT
3098 * from the host save area (which has been initialized in
3099 * svm_hardware_enable()).
3101 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX
) && sev_es_guest(vcpu
->kvm
))
3105 * TSC_AUX is usually changed only during boot and never read
3106 * directly. Intercept TSC_AUX instead of exposing it to the
3107 * guest via direct_access_msrs, and switch it via user return.
3110 ret
= kvm_set_user_return_msr(tsc_aux_uret_slot
, data
, -1ull);
3115 svm
->tsc_aux
= data
;
3117 case MSR_IA32_DEBUGCTLMSR
:
3119 kvm_pr_unimpl_wrmsr(vcpu
, ecx
, data
);
3122 if (data
& DEBUGCTL_RESERVED_BITS
)
3125 svm_get_lbr_vmcb(svm
)->save
.dbgctl
= data
;
3126 svm_update_lbrv(vcpu
);
3128 case MSR_VM_HSAVE_PA
:
3130 * Old kernels did not validate the value written to
3131 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
3132 * value to allow live migrating buggy or malicious guests
3133 * originating from those kernels.
3135 if (!msr
->host_initiated
&& !page_address_valid(vcpu
, data
))
3138 svm
->nested
.hsave_msr
= data
& PAGE_MASK
;
3141 return svm_set_vm_cr(vcpu
, data
);
3143 kvm_pr_unimpl_wrmsr(vcpu
, ecx
, data
);
3145 case MSR_AMD64_DE_CFG
: {
3146 struct kvm_msr_entry msr_entry
;
3148 msr_entry
.index
= msr
->index
;
3149 if (svm_get_msr_feature(&msr_entry
))
3152 /* Check the supported bits */
3153 if (data
& ~msr_entry
.data
)
3156 /* Don't allow the guest to change a bit, #GP */
3157 if (!msr
->host_initiated
&& (data
^ msr_entry
.data
))
3160 svm
->msr_decfg
= data
;
3164 return kvm_set_msr_common(vcpu
, msr
);
3169 static int msr_interception(struct kvm_vcpu
*vcpu
)
3171 if (to_svm(vcpu
)->vmcb
->control
.exit_info_1
)
3172 return kvm_emulate_wrmsr(vcpu
);
3174 return kvm_emulate_rdmsr(vcpu
);
3177 static int interrupt_window_interception(struct kvm_vcpu
*vcpu
)
3179 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3180 svm_clear_vintr(to_svm(vcpu
));
3183 * If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3184 * In this case AVIC was temporarily disabled for
3185 * requesting the IRQ window and we have to re-enable it.
3187 * If running nested, still remove the VM wide AVIC inhibit to
3188 * support case in which the interrupt window was requested when the
3189 * vCPU was not running nested.
3191 * All vCPUs which run still run nested, will remain to have their
3192 * AVIC still inhibited due to per-cpu AVIC inhibition.
3194 kvm_clear_apicv_inhibit(vcpu
->kvm
, APICV_INHIBIT_REASON_IRQWIN
);
3196 ++vcpu
->stat
.irq_window_exits
;
3200 static int pause_interception(struct kvm_vcpu
*vcpu
)
3204 * CPL is not made available for an SEV-ES guest, therefore
3205 * vcpu->arch.preempted_in_kernel can never be true. Just
3206 * set in_kernel to false as well.
3208 in_kernel
= !sev_es_guest(vcpu
->kvm
) && svm_get_cpl(vcpu
) == 0;
3210 grow_ple_window(vcpu
);
3212 kvm_vcpu_on_spin(vcpu
, in_kernel
);
3213 return kvm_skip_emulated_instruction(vcpu
);
3216 static int invpcid_interception(struct kvm_vcpu
*vcpu
)
3218 struct vcpu_svm
*svm
= to_svm(vcpu
);
3222 if (!guest_cpuid_has(vcpu
, X86_FEATURE_INVPCID
)) {
3223 kvm_queue_exception(vcpu
, UD_VECTOR
);
3228 * For an INVPCID intercept:
3229 * EXITINFO1 provides the linear address of the memory operand.
3230 * EXITINFO2 provides the contents of the register operand.
3232 type
= svm
->vmcb
->control
.exit_info_2
;
3233 gva
= svm
->vmcb
->control
.exit_info_1
;
3235 return kvm_handle_invpcid(vcpu
, type
, gva
);
3238 static int (*const svm_exit_handlers
[])(struct kvm_vcpu
*vcpu
) = {
3239 [SVM_EXIT_READ_CR0
] = cr_interception
,
3240 [SVM_EXIT_READ_CR3
] = cr_interception
,
3241 [SVM_EXIT_READ_CR4
] = cr_interception
,
3242 [SVM_EXIT_READ_CR8
] = cr_interception
,
3243 [SVM_EXIT_CR0_SEL_WRITE
] = cr_interception
,
3244 [SVM_EXIT_WRITE_CR0
] = cr_interception
,
3245 [SVM_EXIT_WRITE_CR3
] = cr_interception
,
3246 [SVM_EXIT_WRITE_CR4
] = cr_interception
,
3247 [SVM_EXIT_WRITE_CR8
] = cr8_write_interception
,
3248 [SVM_EXIT_READ_DR0
] = dr_interception
,
3249 [SVM_EXIT_READ_DR1
] = dr_interception
,
3250 [SVM_EXIT_READ_DR2
] = dr_interception
,
3251 [SVM_EXIT_READ_DR3
] = dr_interception
,
3252 [SVM_EXIT_READ_DR4
] = dr_interception
,
3253 [SVM_EXIT_READ_DR5
] = dr_interception
,
3254 [SVM_EXIT_READ_DR6
] = dr_interception
,
3255 [SVM_EXIT_READ_DR7
] = dr_interception
,
3256 [SVM_EXIT_WRITE_DR0
] = dr_interception
,
3257 [SVM_EXIT_WRITE_DR1
] = dr_interception
,
3258 [SVM_EXIT_WRITE_DR2
] = dr_interception
,
3259 [SVM_EXIT_WRITE_DR3
] = dr_interception
,
3260 [SVM_EXIT_WRITE_DR4
] = dr_interception
,
3261 [SVM_EXIT_WRITE_DR5
] = dr_interception
,
3262 [SVM_EXIT_WRITE_DR6
] = dr_interception
,
3263 [SVM_EXIT_WRITE_DR7
] = dr_interception
,
3264 [SVM_EXIT_EXCP_BASE
+ DB_VECTOR
] = db_interception
,
3265 [SVM_EXIT_EXCP_BASE
+ BP_VECTOR
] = bp_interception
,
3266 [SVM_EXIT_EXCP_BASE
+ UD_VECTOR
] = ud_interception
,
3267 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
3268 [SVM_EXIT_EXCP_BASE
+ MC_VECTOR
] = mc_interception
,
3269 [SVM_EXIT_EXCP_BASE
+ AC_VECTOR
] = ac_interception
,
3270 [SVM_EXIT_EXCP_BASE
+ GP_VECTOR
] = gp_interception
,
3271 [SVM_EXIT_INTR
] = intr_interception
,
3272 [SVM_EXIT_NMI
] = nmi_interception
,
3273 [SVM_EXIT_SMI
] = smi_interception
,
3274 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
3275 [SVM_EXIT_RDPMC
] = kvm_emulate_rdpmc
,
3276 [SVM_EXIT_CPUID
] = kvm_emulate_cpuid
,
3277 [SVM_EXIT_IRET
] = iret_interception
,
3278 [SVM_EXIT_INVD
] = kvm_emulate_invd
,
3279 [SVM_EXIT_PAUSE
] = pause_interception
,
3280 [SVM_EXIT_HLT
] = kvm_emulate_halt
,
3281 [SVM_EXIT_INVLPG
] = invlpg_interception
,
3282 [SVM_EXIT_INVLPGA
] = invlpga_interception
,
3283 [SVM_EXIT_IOIO
] = io_interception
,
3284 [SVM_EXIT_MSR
] = msr_interception
,
3285 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
3286 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
3287 [SVM_EXIT_VMRUN
] = vmrun_interception
,
3288 [SVM_EXIT_VMMCALL
] = kvm_emulate_hypercall
,
3289 [SVM_EXIT_VMLOAD
] = vmload_interception
,
3290 [SVM_EXIT_VMSAVE
] = vmsave_interception
,
3291 [SVM_EXIT_STGI
] = stgi_interception
,
3292 [SVM_EXIT_CLGI
] = clgi_interception
,
3293 [SVM_EXIT_SKINIT
] = skinit_interception
,
3294 [SVM_EXIT_RDTSCP
] = kvm_handle_invalid_op
,
3295 [SVM_EXIT_WBINVD
] = kvm_emulate_wbinvd
,
3296 [SVM_EXIT_MONITOR
] = kvm_emulate_monitor
,
3297 [SVM_EXIT_MWAIT
] = kvm_emulate_mwait
,
3298 [SVM_EXIT_XSETBV
] = kvm_emulate_xsetbv
,
3299 [SVM_EXIT_RDPRU
] = kvm_handle_invalid_op
,
3300 [SVM_EXIT_EFER_WRITE_TRAP
] = efer_trap
,
3301 [SVM_EXIT_CR0_WRITE_TRAP
] = cr_trap
,
3302 [SVM_EXIT_CR4_WRITE_TRAP
] = cr_trap
,
3303 [SVM_EXIT_CR8_WRITE_TRAP
] = cr_trap
,
3304 [SVM_EXIT_INVPCID
] = invpcid_interception
,
3305 [SVM_EXIT_NPF
] = npf_interception
,
3306 [SVM_EXIT_RSM
] = rsm_interception
,
3307 [SVM_EXIT_AVIC_INCOMPLETE_IPI
] = avic_incomplete_ipi_interception
,
3308 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS
] = avic_unaccelerated_access_interception
,
3309 [SVM_EXIT_VMGEXIT
] = sev_handle_vmgexit
,
3312 static void dump_vmcb(struct kvm_vcpu
*vcpu
)
3314 struct vcpu_svm
*svm
= to_svm(vcpu
);
3315 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
3316 struct vmcb_save_area
*save
= &svm
->vmcb
->save
;
3317 struct vmcb_save_area
*save01
= &svm
->vmcb01
.ptr
->save
;
3319 if (!dump_invalid_vmcb
) {
3320 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3324 pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3325 svm
->current_vmcb
->ptr
, vcpu
->arch
.last_vmentry_cpu
);
3326 pr_err("VMCB Control Area:\n");
3327 pr_err("%-20s%04x\n", "cr_read:", control
->intercepts
[INTERCEPT_CR
] & 0xffff);
3328 pr_err("%-20s%04x\n", "cr_write:", control
->intercepts
[INTERCEPT_CR
] >> 16);
3329 pr_err("%-20s%04x\n", "dr_read:", control
->intercepts
[INTERCEPT_DR
] & 0xffff);
3330 pr_err("%-20s%04x\n", "dr_write:", control
->intercepts
[INTERCEPT_DR
] >> 16);
3331 pr_err("%-20s%08x\n", "exceptions:", control
->intercepts
[INTERCEPT_EXCEPTION
]);
3332 pr_err("%-20s%08x %08x\n", "intercepts:",
3333 control
->intercepts
[INTERCEPT_WORD3
],
3334 control
->intercepts
[INTERCEPT_WORD4
]);
3335 pr_err("%-20s%d\n", "pause filter count:", control
->pause_filter_count
);
3336 pr_err("%-20s%d\n", "pause filter threshold:",
3337 control
->pause_filter_thresh
);
3338 pr_err("%-20s%016llx\n", "iopm_base_pa:", control
->iopm_base_pa
);
3339 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control
->msrpm_base_pa
);
3340 pr_err("%-20s%016llx\n", "tsc_offset:", control
->tsc_offset
);
3341 pr_err("%-20s%d\n", "asid:", control
->asid
);
3342 pr_err("%-20s%d\n", "tlb_ctl:", control
->tlb_ctl
);
3343 pr_err("%-20s%08x\n", "int_ctl:", control
->int_ctl
);
3344 pr_err("%-20s%08x\n", "int_vector:", control
->int_vector
);
3345 pr_err("%-20s%08x\n", "int_state:", control
->int_state
);
3346 pr_err("%-20s%08x\n", "exit_code:", control
->exit_code
);
3347 pr_err("%-20s%016llx\n", "exit_info1:", control
->exit_info_1
);
3348 pr_err("%-20s%016llx\n", "exit_info2:", control
->exit_info_2
);
3349 pr_err("%-20s%08x\n", "exit_int_info:", control
->exit_int_info
);
3350 pr_err("%-20s%08x\n", "exit_int_info_err:", control
->exit_int_info_err
);
3351 pr_err("%-20s%lld\n", "nested_ctl:", control
->nested_ctl
);
3352 pr_err("%-20s%016llx\n", "nested_cr3:", control
->nested_cr3
);
3353 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control
->avic_vapic_bar
);
3354 pr_err("%-20s%016llx\n", "ghcb:", control
->ghcb_gpa
);
3355 pr_err("%-20s%08x\n", "event_inj:", control
->event_inj
);
3356 pr_err("%-20s%08x\n", "event_inj_err:", control
->event_inj_err
);
3357 pr_err("%-20s%lld\n", "virt_ext:", control
->virt_ext
);
3358 pr_err("%-20s%016llx\n", "next_rip:", control
->next_rip
);
3359 pr_err("%-20s%016llx\n", "avic_backing_page:", control
->avic_backing_page
);
3360 pr_err("%-20s%016llx\n", "avic_logical_id:", control
->avic_logical_id
);
3361 pr_err("%-20s%016llx\n", "avic_physical_id:", control
->avic_physical_id
);
3362 pr_err("%-20s%016llx\n", "vmsa_pa:", control
->vmsa_pa
);
3363 pr_err("VMCB State Save Area:\n");
3364 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3366 save
->es
.selector
, save
->es
.attrib
,
3367 save
->es
.limit
, save
->es
.base
);
3368 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3370 save
->cs
.selector
, save
->cs
.attrib
,
3371 save
->cs
.limit
, save
->cs
.base
);
3372 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3374 save
->ss
.selector
, save
->ss
.attrib
,
3375 save
->ss
.limit
, save
->ss
.base
);
3376 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3378 save
->ds
.selector
, save
->ds
.attrib
,
3379 save
->ds
.limit
, save
->ds
.base
);
3380 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3382 save01
->fs
.selector
, save01
->fs
.attrib
,
3383 save01
->fs
.limit
, save01
->fs
.base
);
3384 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3386 save01
->gs
.selector
, save01
->gs
.attrib
,
3387 save01
->gs
.limit
, save01
->gs
.base
);
3388 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3390 save
->gdtr
.selector
, save
->gdtr
.attrib
,
3391 save
->gdtr
.limit
, save
->gdtr
.base
);
3392 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3394 save01
->ldtr
.selector
, save01
->ldtr
.attrib
,
3395 save01
->ldtr
.limit
, save01
->ldtr
.base
);
3396 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3398 save
->idtr
.selector
, save
->idtr
.attrib
,
3399 save
->idtr
.limit
, save
->idtr
.base
);
3400 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3402 save01
->tr
.selector
, save01
->tr
.attrib
,
3403 save01
->tr
.limit
, save01
->tr
.base
);
3404 pr_err("vmpl: %d cpl: %d efer: %016llx\n",
3405 save
->vmpl
, save
->cpl
, save
->efer
);
3406 pr_err("%-15s %016llx %-13s %016llx\n",
3407 "cr0:", save
->cr0
, "cr2:", save
->cr2
);
3408 pr_err("%-15s %016llx %-13s %016llx\n",
3409 "cr3:", save
->cr3
, "cr4:", save
->cr4
);
3410 pr_err("%-15s %016llx %-13s %016llx\n",
3411 "dr6:", save
->dr6
, "dr7:", save
->dr7
);
3412 pr_err("%-15s %016llx %-13s %016llx\n",
3413 "rip:", save
->rip
, "rflags:", save
->rflags
);
3414 pr_err("%-15s %016llx %-13s %016llx\n",
3415 "rsp:", save
->rsp
, "rax:", save
->rax
);
3416 pr_err("%-15s %016llx %-13s %016llx\n",
3417 "star:", save01
->star
, "lstar:", save01
->lstar
);
3418 pr_err("%-15s %016llx %-13s %016llx\n",
3419 "cstar:", save01
->cstar
, "sfmask:", save01
->sfmask
);
3420 pr_err("%-15s %016llx %-13s %016llx\n",
3421 "kernel_gs_base:", save01
->kernel_gs_base
,
3422 "sysenter_cs:", save01
->sysenter_cs
);
3423 pr_err("%-15s %016llx %-13s %016llx\n",
3424 "sysenter_esp:", save01
->sysenter_esp
,
3425 "sysenter_eip:", save01
->sysenter_eip
);
3426 pr_err("%-15s %016llx %-13s %016llx\n",
3427 "gpat:", save
->g_pat
, "dbgctl:", save
->dbgctl
);
3428 pr_err("%-15s %016llx %-13s %016llx\n",
3429 "br_from:", save
->br_from
, "br_to:", save
->br_to
);
3430 pr_err("%-15s %016llx %-13s %016llx\n",
3431 "excp_from:", save
->last_excp_from
,
3432 "excp_to:", save
->last_excp_to
);
3435 static bool svm_check_exit_valid(u64 exit_code
)
3437 return (exit_code
< ARRAY_SIZE(svm_exit_handlers
) &&
3438 svm_exit_handlers
[exit_code
]);
3441 static int svm_handle_invalid_exit(struct kvm_vcpu
*vcpu
, u64 exit_code
)
3443 vcpu_unimpl(vcpu
, "svm: unexpected exit reason 0x%llx\n", exit_code
);
3445 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3446 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON
;
3447 vcpu
->run
->internal
.ndata
= 2;
3448 vcpu
->run
->internal
.data
[0] = exit_code
;
3449 vcpu
->run
->internal
.data
[1] = vcpu
->arch
.last_vmentry_cpu
;
3453 int svm_invoke_exit_handler(struct kvm_vcpu
*vcpu
, u64 exit_code
)
3455 if (!svm_check_exit_valid(exit_code
))
3456 return svm_handle_invalid_exit(vcpu
, exit_code
);
3458 #ifdef CONFIG_RETPOLINE
3459 if (exit_code
== SVM_EXIT_MSR
)
3460 return msr_interception(vcpu
);
3461 else if (exit_code
== SVM_EXIT_VINTR
)
3462 return interrupt_window_interception(vcpu
);
3463 else if (exit_code
== SVM_EXIT_INTR
)
3464 return intr_interception(vcpu
);
3465 else if (exit_code
== SVM_EXIT_HLT
)
3466 return kvm_emulate_halt(vcpu
);
3467 else if (exit_code
== SVM_EXIT_NPF
)
3468 return npf_interception(vcpu
);
3470 return svm_exit_handlers
[exit_code
](vcpu
);
3473 static void svm_get_exit_info(struct kvm_vcpu
*vcpu
, u32
*reason
,
3474 u64
*info1
, u64
*info2
,
3475 u32
*intr_info
, u32
*error_code
)
3477 struct vmcb_control_area
*control
= &to_svm(vcpu
)->vmcb
->control
;
3479 *reason
= control
->exit_code
;
3480 *info1
= control
->exit_info_1
;
3481 *info2
= control
->exit_info_2
;
3482 *intr_info
= control
->exit_int_info
;
3483 if ((*intr_info
& SVM_EXITINTINFO_VALID
) &&
3484 (*intr_info
& SVM_EXITINTINFO_VALID_ERR
))
3485 *error_code
= control
->exit_int_info_err
;
3490 static int svm_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
)
3492 struct vcpu_svm
*svm
= to_svm(vcpu
);
3493 struct kvm_run
*kvm_run
= vcpu
->run
;
3494 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
3496 /* SEV-ES guests must use the CR write traps to track CR registers. */
3497 if (!sev_es_guest(vcpu
->kvm
)) {
3498 if (!svm_is_intercept(svm
, INTERCEPT_CR0_WRITE
))
3499 vcpu
->arch
.cr0
= svm
->vmcb
->save
.cr0
;
3501 vcpu
->arch
.cr3
= svm
->vmcb
->save
.cr3
;
3504 if (is_guest_mode(vcpu
)) {
3507 trace_kvm_nested_vmexit(vcpu
, KVM_ISA_SVM
);
3509 vmexit
= nested_svm_exit_special(svm
);
3511 if (vmexit
== NESTED_EXIT_CONTINUE
)
3512 vmexit
= nested_svm_exit_handled(svm
);
3514 if (vmexit
== NESTED_EXIT_DONE
)
3518 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
3519 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
3520 kvm_run
->fail_entry
.hardware_entry_failure_reason
3521 = svm
->vmcb
->control
.exit_code
;
3522 kvm_run
->fail_entry
.cpu
= vcpu
->arch
.last_vmentry_cpu
;
3527 if (exit_fastpath
!= EXIT_FASTPATH_NONE
)
3530 return svm_invoke_exit_handler(vcpu
, exit_code
);
3533 static void pre_svm_run(struct kvm_vcpu
*vcpu
)
3535 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, vcpu
->cpu
);
3536 struct vcpu_svm
*svm
= to_svm(vcpu
);
3539 * If the previous vmrun of the vmcb occurred on a different physical
3540 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3541 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3543 if (unlikely(svm
->current_vmcb
->cpu
!= vcpu
->cpu
)) {
3544 svm
->current_vmcb
->asid_generation
= 0;
3545 vmcb_mark_all_dirty(svm
->vmcb
);
3546 svm
->current_vmcb
->cpu
= vcpu
->cpu
;
3549 if (sev_guest(vcpu
->kvm
))
3550 return pre_sev_run(svm
, vcpu
->cpu
);
3552 /* FIXME: handle wraparound of asid_generation */
3553 if (svm
->current_vmcb
->asid_generation
!= sd
->asid_generation
)
3557 static void svm_inject_nmi(struct kvm_vcpu
*vcpu
)
3559 struct vcpu_svm
*svm
= to_svm(vcpu
);
3561 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_NMI
;
3563 if (svm
->nmi_l1_to_l2
)
3566 svm
->nmi_masked
= true;
3567 svm_set_iret_intercept(svm
);
3568 ++vcpu
->stat
.nmi_injections
;
3571 static bool svm_is_vnmi_pending(struct kvm_vcpu
*vcpu
)
3573 struct vcpu_svm
*svm
= to_svm(vcpu
);
3575 if (!is_vnmi_enabled(svm
))
3578 return !!(svm
->vmcb
->control
.int_ctl
& V_NMI_PENDING_MASK
);
3581 static bool svm_set_vnmi_pending(struct kvm_vcpu
*vcpu
)
3583 struct vcpu_svm
*svm
= to_svm(vcpu
);
3585 if (!is_vnmi_enabled(svm
))
3588 if (svm
->vmcb
->control
.int_ctl
& V_NMI_PENDING_MASK
)
3591 svm
->vmcb
->control
.int_ctl
|= V_NMI_PENDING_MASK
;
3592 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTR
);
3595 * Because the pending NMI is serviced by hardware, KVM can't know when
3596 * the NMI is "injected", but for all intents and purposes, passing the
3597 * NMI off to hardware counts as injection.
3599 ++vcpu
->stat
.nmi_injections
;
3604 static void svm_inject_irq(struct kvm_vcpu
*vcpu
, bool reinjected
)
3606 struct vcpu_svm
*svm
= to_svm(vcpu
);
3609 if (vcpu
->arch
.interrupt
.soft
) {
3610 if (svm_update_soft_interrupt_rip(vcpu
))
3613 type
= SVM_EVTINJ_TYPE_SOFT
;
3615 type
= SVM_EVTINJ_TYPE_INTR
;
3618 trace_kvm_inj_virq(vcpu
->arch
.interrupt
.nr
,
3619 vcpu
->arch
.interrupt
.soft
, reinjected
);
3620 ++vcpu
->stat
.irq_injections
;
3622 svm
->vmcb
->control
.event_inj
= vcpu
->arch
.interrupt
.nr
|
3623 SVM_EVTINJ_VALID
| type
;
3626 void svm_complete_interrupt_delivery(struct kvm_vcpu
*vcpu
, int delivery_mode
,
3627 int trig_mode
, int vector
)
3630 * apic->apicv_active must be read after vcpu->mode.
3631 * Pairs with smp_store_release in vcpu_enter_guest.
3633 bool in_guest_mode
= (smp_load_acquire(&vcpu
->mode
) == IN_GUEST_MODE
);
3635 /* Note, this is called iff the local APIC is in-kernel. */
3636 if (!READ_ONCE(vcpu
->arch
.apic
->apicv_active
)) {
3637 /* Process the interrupt via kvm_check_and_inject_events(). */
3638 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3639 kvm_vcpu_kick(vcpu
);
3643 trace_kvm_apicv_accept_irq(vcpu
->vcpu_id
, delivery_mode
, trig_mode
, vector
);
3644 if (in_guest_mode
) {
3646 * Signal the doorbell to tell hardware to inject the IRQ. If
3647 * the vCPU exits the guest before the doorbell chimes, hardware
3648 * will automatically process AVIC interrupts at the next VMRUN.
3650 avic_ring_doorbell(vcpu
);
3653 * Wake the vCPU if it was blocking. KVM will then detect the
3654 * pending IRQ when checking if the vCPU has a wake event.
3656 kvm_vcpu_wake_up(vcpu
);
3660 static void svm_deliver_interrupt(struct kvm_lapic
*apic
, int delivery_mode
,
3661 int trig_mode
, int vector
)
3663 kvm_lapic_set_irr(vector
, apic
);
3666 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3667 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3668 * the read of guest_mode. This guarantees that either VMRUN will see
3669 * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3670 * will signal the doorbell if the CPU has already entered the guest.
3672 smp_mb__after_atomic();
3673 svm_complete_interrupt_delivery(apic
->vcpu
, delivery_mode
, trig_mode
, vector
);
3676 static void svm_update_cr8_intercept(struct kvm_vcpu
*vcpu
, int tpr
, int irr
)
3678 struct vcpu_svm
*svm
= to_svm(vcpu
);
3681 * SEV-ES guests must always keep the CR intercepts cleared. CR
3682 * tracking is done using the CR write traps.
3684 if (sev_es_guest(vcpu
->kvm
))
3687 if (nested_svm_virtualize_tpr(vcpu
))
3690 svm_clr_intercept(svm
, INTERCEPT_CR8_WRITE
);
3696 svm_set_intercept(svm
, INTERCEPT_CR8_WRITE
);
3699 static bool svm_get_nmi_mask(struct kvm_vcpu
*vcpu
)
3701 struct vcpu_svm
*svm
= to_svm(vcpu
);
3703 if (is_vnmi_enabled(svm
))
3704 return svm
->vmcb
->control
.int_ctl
& V_NMI_BLOCKING_MASK
;
3706 return svm
->nmi_masked
;
3709 static void svm_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
)
3711 struct vcpu_svm
*svm
= to_svm(vcpu
);
3713 if (is_vnmi_enabled(svm
)) {
3715 svm
->vmcb
->control
.int_ctl
|= V_NMI_BLOCKING_MASK
;
3717 svm
->vmcb
->control
.int_ctl
&= ~V_NMI_BLOCKING_MASK
;
3720 svm
->nmi_masked
= masked
;
3722 svm_set_iret_intercept(svm
);
3724 svm_clr_iret_intercept(svm
);
3728 bool svm_nmi_blocked(struct kvm_vcpu
*vcpu
)
3730 struct vcpu_svm
*svm
= to_svm(vcpu
);
3731 struct vmcb
*vmcb
= svm
->vmcb
;
3736 if (is_guest_mode(vcpu
) && nested_exit_on_nmi(svm
))
3739 if (svm_get_nmi_mask(vcpu
))
3742 return vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
;
3745 static int svm_nmi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
3747 struct vcpu_svm
*svm
= to_svm(vcpu
);
3748 if (svm
->nested
.nested_run_pending
)
3751 if (svm_nmi_blocked(vcpu
))
3754 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
3755 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_nmi(svm
))
3760 bool svm_interrupt_blocked(struct kvm_vcpu
*vcpu
)
3762 struct vcpu_svm
*svm
= to_svm(vcpu
);
3763 struct vmcb
*vmcb
= svm
->vmcb
;
3768 if (is_guest_mode(vcpu
)) {
3769 /* As long as interrupts are being delivered... */
3770 if ((svm
->nested
.ctl
.int_ctl
& V_INTR_MASKING_MASK
)
3771 ? !(svm
->vmcb01
.ptr
->save
.rflags
& X86_EFLAGS_IF
)
3772 : !(kvm_get_rflags(vcpu
) & X86_EFLAGS_IF
))
3775 /* ... vmexits aren't blocked by the interrupt shadow */
3776 if (nested_exit_on_intr(svm
))
3779 if (!svm_get_if_flag(vcpu
))
3783 return (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
);
3786 static int svm_interrupt_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
3788 struct vcpu_svm
*svm
= to_svm(vcpu
);
3790 if (svm
->nested
.nested_run_pending
)
3793 if (svm_interrupt_blocked(vcpu
))
3797 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3798 * e.g. if the IRQ arrived asynchronously after checking nested events.
3800 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_intr(svm
))
3806 static void svm_enable_irq_window(struct kvm_vcpu
*vcpu
)
3808 struct vcpu_svm
*svm
= to_svm(vcpu
);
3811 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3812 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3813 * get that intercept, this function will be called again though and
3814 * we'll get the vintr intercept. However, if the vGIF feature is
3815 * enabled, the STGI interception will not occur. Enable the irq
3816 * window under the assumption that the hardware will set the GIF.
3818 if (vgif
|| gif_set(svm
)) {
3820 * IRQ window is not needed when AVIC is enabled,
3821 * unless we have pending ExtINT since it cannot be injected
3822 * via AVIC. In such case, KVM needs to temporarily disable AVIC,
3823 * and fallback to injecting IRQ via V_IRQ.
3825 * If running nested, AVIC is already locally inhibited
3826 * on this vCPU, therefore there is no need to request
3827 * the VM wide AVIC inhibition.
3829 if (!is_guest_mode(vcpu
))
3830 kvm_set_apicv_inhibit(vcpu
->kvm
, APICV_INHIBIT_REASON_IRQWIN
);
3836 static void svm_enable_nmi_window(struct kvm_vcpu
*vcpu
)
3838 struct vcpu_svm
*svm
= to_svm(vcpu
);
3841 * KVM should never request an NMI window when vNMI is enabled, as KVM
3842 * allows at most one to-be-injected NMI and one pending NMI, i.e. if
3843 * two NMIs arrive simultaneously, KVM will inject one and set
3844 * V_NMI_PENDING for the other. WARN, but continue with the standard
3845 * single-step approach to try and salvage the pending NMI.
3847 WARN_ON_ONCE(is_vnmi_enabled(svm
));
3849 if (svm_get_nmi_mask(vcpu
) && !svm
->awaiting_iret_completion
)
3850 return; /* IRET will cause a vm exit */
3853 * SEV-ES guests are responsible for signaling when a vCPU is ready to
3854 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
3855 * KVM can't intercept and single-step IRET to detect when NMIs are
3856 * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE.
3858 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
3859 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
3860 * supported NAEs in the GHCB protocol.
3862 if (sev_es_guest(vcpu
->kvm
))
3865 if (!gif_set(svm
)) {
3867 svm_set_intercept(svm
, INTERCEPT_STGI
);
3868 return; /* STGI will cause a vm exit */
3872 * Something prevents NMI from been injected. Single step over possible
3873 * problem (IRET or exception injection or interrupt shadow)
3875 svm
->nmi_singlestep_guest_rflags
= svm_get_rflags(vcpu
);
3876 svm
->nmi_singlestep
= true;
3877 svm
->vmcb
->save
.rflags
|= (X86_EFLAGS_TF
| X86_EFLAGS_RF
);
3880 static void svm_flush_tlb_asid(struct kvm_vcpu
*vcpu
)
3882 struct vcpu_svm
*svm
= to_svm(vcpu
);
3885 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
3886 * A TLB flush for the current ASID flushes both "host" and "guest" TLB
3887 * entries, and thus is a superset of Hyper-V's fine grained flushing.
3889 kvm_hv_vcpu_purge_flush_tlb(vcpu
);
3892 * Flush only the current ASID even if the TLB flush was invoked via
3893 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
3894 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3895 * unconditionally does a TLB flush on both nested VM-Enter and nested
3896 * VM-Exit (via kvm_mmu_reset_context()).
3898 if (static_cpu_has(X86_FEATURE_FLUSHBYASID
))
3899 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ASID
;
3901 svm
->current_vmcb
->asid_generation
--;
3904 static void svm_flush_tlb_current(struct kvm_vcpu
*vcpu
)
3906 hpa_t root_tdp
= vcpu
->arch
.mmu
->root
.hpa
;
3909 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
3910 * flush the NPT mappings via hypercall as flushing the ASID only
3911 * affects virtual to physical mappings, it does not invalidate guest
3912 * physical to host physical mappings.
3914 if (svm_hv_is_enlightened_tlb_enabled(vcpu
) && VALID_PAGE(root_tdp
))
3915 hyperv_flush_guest_mapping(root_tdp
);
3917 svm_flush_tlb_asid(vcpu
);
3920 static void svm_flush_tlb_all(struct kvm_vcpu
*vcpu
)
3923 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
3924 * flushes should be routed to hv_flush_remote_tlbs() without requesting
3925 * a "regular" remote flush. Reaching this point means either there's
3926 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
3927 * which might be fatal to the guest. Yell, but try to recover.
3929 if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu
)))
3930 hv_flush_remote_tlbs(vcpu
->kvm
);
3932 svm_flush_tlb_asid(vcpu
);
3935 static void svm_flush_tlb_gva(struct kvm_vcpu
*vcpu
, gva_t gva
)
3937 struct vcpu_svm
*svm
= to_svm(vcpu
);
3939 invlpga(gva
, svm
->vmcb
->control
.asid
);
3942 static inline void sync_cr8_to_lapic(struct kvm_vcpu
*vcpu
)
3944 struct vcpu_svm
*svm
= to_svm(vcpu
);
3946 if (nested_svm_virtualize_tpr(vcpu
))
3949 if (!svm_is_intercept(svm
, INTERCEPT_CR8_WRITE
)) {
3950 int cr8
= svm
->vmcb
->control
.int_ctl
& V_TPR_MASK
;
3951 kvm_set_cr8(vcpu
, cr8
);
3955 static inline void sync_lapic_to_cr8(struct kvm_vcpu
*vcpu
)
3957 struct vcpu_svm
*svm
= to_svm(vcpu
);
3960 if (nested_svm_virtualize_tpr(vcpu
) ||
3961 kvm_vcpu_apicv_active(vcpu
))
3964 cr8
= kvm_get_cr8(vcpu
);
3965 svm
->vmcb
->control
.int_ctl
&= ~V_TPR_MASK
;
3966 svm
->vmcb
->control
.int_ctl
|= cr8
& V_TPR_MASK
;
3969 static void svm_complete_soft_interrupt(struct kvm_vcpu
*vcpu
, u8 vector
,
3972 bool is_exception
= (type
== SVM_EXITINTINFO_TYPE_EXEPT
);
3973 bool is_soft
= (type
== SVM_EXITINTINFO_TYPE_SOFT
);
3974 struct vcpu_svm
*svm
= to_svm(vcpu
);
3977 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
3978 * associated with the original soft exception/interrupt. next_rip is
3979 * cleared on all exits that can occur while vectoring an event, so KVM
3980 * needs to manually set next_rip for re-injection. Unlike the !nrips
3981 * case below, this needs to be done if and only if KVM is re-injecting
3982 * the same event, i.e. if the event is a soft exception/interrupt,
3983 * otherwise next_rip is unused on VMRUN.
3985 if (nrips
&& (is_soft
|| (is_exception
&& kvm_exception_is_soft(vector
))) &&
3986 kvm_is_linear_rip(vcpu
, svm
->soft_int_old_rip
+ svm
->soft_int_csbase
))
3987 svm
->vmcb
->control
.next_rip
= svm
->soft_int_next_rip
;
3989 * If NRIPS isn't enabled, KVM must manually advance RIP prior to
3990 * injecting the soft exception/interrupt. That advancement needs to
3991 * be unwound if vectoring didn't complete. Note, the new event may
3992 * not be the injected event, e.g. if KVM injected an INTn, the INTn
3993 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
3994 * be the reported vectored event, but RIP still needs to be unwound.
3996 else if (!nrips
&& (is_soft
|| is_exception
) &&
3997 kvm_is_linear_rip(vcpu
, svm
->soft_int_next_rip
+ svm
->soft_int_csbase
))
3998 kvm_rip_write(vcpu
, svm
->soft_int_old_rip
);
4001 static void svm_complete_interrupts(struct kvm_vcpu
*vcpu
)
4003 struct vcpu_svm
*svm
= to_svm(vcpu
);
4006 u32 exitintinfo
= svm
->vmcb
->control
.exit_int_info
;
4007 bool nmi_l1_to_l2
= svm
->nmi_l1_to_l2
;
4008 bool soft_int_injected
= svm
->soft_int_injected
;
4010 svm
->nmi_l1_to_l2
= false;
4011 svm
->soft_int_injected
= false;
4014 * If we've made progress since setting awaiting_iret_completion, we've
4015 * executed an IRET and can allow NMI injection.
4017 if (svm
->awaiting_iret_completion
&&
4018 kvm_rip_read(vcpu
) != svm
->nmi_iret_rip
) {
4019 svm
->awaiting_iret_completion
= false;
4020 svm
->nmi_masked
= false;
4021 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4024 vcpu
->arch
.nmi_injected
= false;
4025 kvm_clear_exception_queue(vcpu
);
4026 kvm_clear_interrupt_queue(vcpu
);
4028 if (!(exitintinfo
& SVM_EXITINTINFO_VALID
))
4031 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4033 vector
= exitintinfo
& SVM_EXITINTINFO_VEC_MASK
;
4034 type
= exitintinfo
& SVM_EXITINTINFO_TYPE_MASK
;
4036 if (soft_int_injected
)
4037 svm_complete_soft_interrupt(vcpu
, vector
, type
);
4040 case SVM_EXITINTINFO_TYPE_NMI
:
4041 vcpu
->arch
.nmi_injected
= true;
4042 svm
->nmi_l1_to_l2
= nmi_l1_to_l2
;
4044 case SVM_EXITINTINFO_TYPE_EXEPT
:
4046 * Never re-inject a #VC exception.
4048 if (vector
== X86_TRAP_VC
)
4051 if (exitintinfo
& SVM_EXITINTINFO_VALID_ERR
) {
4052 u32 err
= svm
->vmcb
->control
.exit_int_info_err
;
4053 kvm_requeue_exception_e(vcpu
, vector
, err
);
4056 kvm_requeue_exception(vcpu
, vector
);
4058 case SVM_EXITINTINFO_TYPE_INTR
:
4059 kvm_queue_interrupt(vcpu
, vector
, false);
4061 case SVM_EXITINTINFO_TYPE_SOFT
:
4062 kvm_queue_interrupt(vcpu
, vector
, true);
4070 static void svm_cancel_injection(struct kvm_vcpu
*vcpu
)
4072 struct vcpu_svm
*svm
= to_svm(vcpu
);
4073 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
4075 control
->exit_int_info
= control
->event_inj
;
4076 control
->exit_int_info_err
= control
->event_inj_err
;
4077 control
->event_inj
= 0;
4078 svm_complete_interrupts(vcpu
);
4081 static int svm_vcpu_pre_run(struct kvm_vcpu
*vcpu
)
4086 static fastpath_t
svm_exit_handlers_fastpath(struct kvm_vcpu
*vcpu
)
4088 if (to_svm(vcpu
)->vmcb
->control
.exit_code
== SVM_EXIT_MSR
&&
4089 to_svm(vcpu
)->vmcb
->control
.exit_info_1
)
4090 return handle_fastpath_set_msr_irqoff(vcpu
);
4092 return EXIT_FASTPATH_NONE
;
4095 static noinstr
void svm_vcpu_enter_exit(struct kvm_vcpu
*vcpu
, bool spec_ctrl_intercepted
)
4097 struct vcpu_svm
*svm
= to_svm(vcpu
);
4099 guest_state_enter_irqoff();
4101 amd_clear_divider();
4103 if (sev_es_guest(vcpu
->kvm
))
4104 __svm_sev_es_vcpu_run(svm
, spec_ctrl_intercepted
);
4106 __svm_vcpu_run(svm
, spec_ctrl_intercepted
);
4108 guest_state_exit_irqoff();
4111 static __no_kcsan fastpath_t
svm_vcpu_run(struct kvm_vcpu
*vcpu
)
4113 struct vcpu_svm
*svm
= to_svm(vcpu
);
4114 bool spec_ctrl_intercepted
= msr_write_intercepted(vcpu
, MSR_IA32_SPEC_CTRL
);
4116 trace_kvm_entry(vcpu
);
4118 svm
->vmcb
->save
.rax
= vcpu
->arch
.regs
[VCPU_REGS_RAX
];
4119 svm
->vmcb
->save
.rsp
= vcpu
->arch
.regs
[VCPU_REGS_RSP
];
4120 svm
->vmcb
->save
.rip
= vcpu
->arch
.regs
[VCPU_REGS_RIP
];
4123 * Disable singlestep if we're injecting an interrupt/exception.
4124 * We don't want our modified rflags to be pushed on the stack where
4125 * we might not be able to easily reset them if we disabled NMI
4128 if (svm
->nmi_singlestep
&& svm
->vmcb
->control
.event_inj
) {
4130 * Event injection happens before external interrupts cause a
4131 * vmexit and interrupts are disabled here, so smp_send_reschedule
4132 * is enough to force an immediate vmexit.
4134 disable_nmi_singlestep(svm
);
4135 smp_send_reschedule(vcpu
->cpu
);
4140 sync_lapic_to_cr8(vcpu
);
4142 if (unlikely(svm
->asid
!= svm
->vmcb
->control
.asid
)) {
4143 svm
->vmcb
->control
.asid
= svm
->asid
;
4144 vmcb_mark_dirty(svm
->vmcb
, VMCB_ASID
);
4146 svm
->vmcb
->save
.cr2
= vcpu
->arch
.cr2
;
4148 svm_hv_update_vp_id(svm
->vmcb
, vcpu
);
4151 * Run with all-zero DR6 unless needed, so that we can get the exact cause
4154 if (unlikely(vcpu
->arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
))
4155 svm_set_dr6(svm
, vcpu
->arch
.dr6
);
4157 svm_set_dr6(svm
, DR6_ACTIVE_LOW
);
4160 kvm_load_guest_xsave_state(vcpu
);
4162 kvm_wait_lapic_expire(vcpu
);
4165 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4166 * it's non-zero. Since vmentry is serialising on affected CPUs, there
4167 * is no need to worry about the conditional branch over the wrmsr
4168 * being speculatively taken.
4170 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
4171 x86_spec_ctrl_set_guest(svm
->virt_spec_ctrl
);
4173 svm_vcpu_enter_exit(vcpu
, spec_ctrl_intercepted
);
4175 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
4176 x86_spec_ctrl_restore_host(svm
->virt_spec_ctrl
);
4178 if (!sev_es_guest(vcpu
->kvm
)) {
4179 vcpu
->arch
.cr2
= svm
->vmcb
->save
.cr2
;
4180 vcpu
->arch
.regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
4181 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
4182 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = svm
->vmcb
->save
.rip
;
4184 vcpu
->arch
.regs_dirty
= 0;
4186 if (unlikely(svm
->vmcb
->control
.exit_code
== SVM_EXIT_NMI
))
4187 kvm_before_interrupt(vcpu
, KVM_HANDLING_NMI
);
4189 kvm_load_host_xsave_state(vcpu
);
4192 /* Any pending NMI will happen here */
4194 if (unlikely(svm
->vmcb
->control
.exit_code
== SVM_EXIT_NMI
))
4195 kvm_after_interrupt(vcpu
);
4197 sync_cr8_to_lapic(vcpu
);
4200 if (is_guest_mode(vcpu
)) {
4201 nested_sync_control_from_vmcb02(svm
);
4203 /* Track VMRUNs that have made past consistency checking */
4204 if (svm
->nested
.nested_run_pending
&&
4205 svm
->vmcb
->control
.exit_code
!= SVM_EXIT_ERR
)
4206 ++vcpu
->stat
.nested_run
;
4208 svm
->nested
.nested_run_pending
= 0;
4211 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
4212 vmcb_mark_all_clean(svm
->vmcb
);
4214 /* if exit due to PF check for async PF */
4215 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
4216 vcpu
->arch
.apf
.host_apf_flags
=
4217 kvm_read_and_reset_apf_flags();
4219 vcpu
->arch
.regs_avail
&= ~SVM_REGS_LAZY_LOAD_SET
;
4222 * We need to handle MC intercepts here before the vcpu has a chance to
4223 * change the physical cpu
4225 if (unlikely(svm
->vmcb
->control
.exit_code
==
4226 SVM_EXIT_EXCP_BASE
+ MC_VECTOR
))
4227 svm_handle_mce(vcpu
);
4229 trace_kvm_exit(vcpu
, KVM_ISA_SVM
);
4231 svm_complete_interrupts(vcpu
);
4233 if (is_guest_mode(vcpu
))
4234 return EXIT_FASTPATH_NONE
;
4236 return svm_exit_handlers_fastpath(vcpu
);
4239 static void svm_load_mmu_pgd(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
,
4242 struct vcpu_svm
*svm
= to_svm(vcpu
);
4246 svm
->vmcb
->control
.nested_cr3
= __sme_set(root_hpa
);
4247 vmcb_mark_dirty(svm
->vmcb
, VMCB_NPT
);
4249 hv_track_root_tdp(vcpu
, root_hpa
);
4251 cr3
= vcpu
->arch
.cr3
;
4252 } else if (root_level
>= PT64_ROOT_4LEVEL
) {
4253 cr3
= __sme_set(root_hpa
) | kvm_get_active_pcid(vcpu
);
4255 /* PCID in the guest should be impossible with a 32-bit MMU. */
4256 WARN_ON_ONCE(kvm_get_active_pcid(vcpu
));
4260 svm
->vmcb
->save
.cr3
= cr3
;
4261 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
4265 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
4268 * Patch in the VMMCALL instruction:
4270 hypercall
[0] = 0x0f;
4271 hypercall
[1] = 0x01;
4272 hypercall
[2] = 0xd9;
4276 * The kvm parameter can be NULL (module initialization, or invocation before
4277 * VM creation). Be sure to check the kvm parameter before using it.
4279 static bool svm_has_emulated_msr(struct kvm
*kvm
, u32 index
)
4282 case MSR_IA32_MCG_EXT_CTL
:
4283 case KVM_FIRST_EMULATED_VMX_MSR
... KVM_LAST_EMULATED_VMX_MSR
:
4285 case MSR_IA32_SMBASE
:
4286 if (!IS_ENABLED(CONFIG_KVM_SMM
))
4288 /* SEV-ES guests do not support SMM, so report false */
4289 if (kvm
&& sev_es_guest(kvm
))
4299 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu
*vcpu
)
4301 struct vcpu_svm
*svm
= to_svm(vcpu
);
4304 * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
4305 * can only disable all variants of by disallowing CR4.OSXSAVE from
4306 * being set. As a result, if the host has XSAVE and XSAVES, and the
4307 * guest has XSAVE enabled, the guest can execute XSAVES without
4308 * faulting. Treat XSAVES as enabled in this case regardless of
4309 * whether it's advertised to the guest so that KVM context switches
4310 * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
4311 * the guest read/write access to the host's XSS.
4313 if (boot_cpu_has(X86_FEATURE_XSAVE
) &&
4314 boot_cpu_has(X86_FEATURE_XSAVES
) &&
4315 guest_cpuid_has(vcpu
, X86_FEATURE_XSAVE
))
4316 kvm_governed_feature_set(vcpu
, X86_FEATURE_XSAVES
);
4318 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_NRIPS
);
4319 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_TSCRATEMSR
);
4320 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_LBRV
);
4323 * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that
4324 * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
4325 * SVM on Intel is bonkers and extremely unlikely to work).
4327 if (!guest_cpuid_is_intel(vcpu
))
4328 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_V_VMSAVE_VMLOAD
);
4330 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_PAUSEFILTER
);
4331 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_PFTHRESHOLD
);
4332 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_VGIF
);
4333 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_VNMI
);
4335 svm_recalc_instruction_intercepts(vcpu
, svm
);
4337 if (boot_cpu_has(X86_FEATURE_IBPB
))
4338 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_PRED_CMD
, 0,
4339 !!guest_has_pred_cmd_msr(vcpu
));
4341 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
4342 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_FLUSH_CMD
, 0,
4343 !!guest_cpuid_has(vcpu
, X86_FEATURE_FLUSH_L1D
));
4345 if (sev_guest(vcpu
->kvm
))
4346 sev_vcpu_after_set_cpuid(svm
);
4348 init_vmcb_after_set_cpuid(vcpu
);
4351 static bool svm_has_wbinvd_exit(void)
4356 #define PRE_EX(exit) { .exit_code = (exit), \
4357 .stage = X86_ICPT_PRE_EXCEPT, }
4358 #define POST_EX(exit) { .exit_code = (exit), \
4359 .stage = X86_ICPT_POST_EXCEPT, }
4360 #define POST_MEM(exit) { .exit_code = (exit), \
4361 .stage = X86_ICPT_POST_MEMACCESS, }
4363 static const struct __x86_intercept
{
4365 enum x86_intercept_stage stage
;
4366 } x86_intercept_map
[] = {
4367 [x86_intercept_cr_read
] = POST_EX(SVM_EXIT_READ_CR0
),
4368 [x86_intercept_cr_write
] = POST_EX(SVM_EXIT_WRITE_CR0
),
4369 [x86_intercept_clts
] = POST_EX(SVM_EXIT_WRITE_CR0
),
4370 [x86_intercept_lmsw
] = POST_EX(SVM_EXIT_WRITE_CR0
),
4371 [x86_intercept_smsw
] = POST_EX(SVM_EXIT_READ_CR0
),
4372 [x86_intercept_dr_read
] = POST_EX(SVM_EXIT_READ_DR0
),
4373 [x86_intercept_dr_write
] = POST_EX(SVM_EXIT_WRITE_DR0
),
4374 [x86_intercept_sldt
] = POST_EX(SVM_EXIT_LDTR_READ
),
4375 [x86_intercept_str
] = POST_EX(SVM_EXIT_TR_READ
),
4376 [x86_intercept_lldt
] = POST_EX(SVM_EXIT_LDTR_WRITE
),
4377 [x86_intercept_ltr
] = POST_EX(SVM_EXIT_TR_WRITE
),
4378 [x86_intercept_sgdt
] = POST_EX(SVM_EXIT_GDTR_READ
),
4379 [x86_intercept_sidt
] = POST_EX(SVM_EXIT_IDTR_READ
),
4380 [x86_intercept_lgdt
] = POST_EX(SVM_EXIT_GDTR_WRITE
),
4381 [x86_intercept_lidt
] = POST_EX(SVM_EXIT_IDTR_WRITE
),
4382 [x86_intercept_vmrun
] = POST_EX(SVM_EXIT_VMRUN
),
4383 [x86_intercept_vmmcall
] = POST_EX(SVM_EXIT_VMMCALL
),
4384 [x86_intercept_vmload
] = POST_EX(SVM_EXIT_VMLOAD
),
4385 [x86_intercept_vmsave
] = POST_EX(SVM_EXIT_VMSAVE
),
4386 [x86_intercept_stgi
] = POST_EX(SVM_EXIT_STGI
),
4387 [x86_intercept_clgi
] = POST_EX(SVM_EXIT_CLGI
),
4388 [x86_intercept_skinit
] = POST_EX(SVM_EXIT_SKINIT
),
4389 [x86_intercept_invlpga
] = POST_EX(SVM_EXIT_INVLPGA
),
4390 [x86_intercept_rdtscp
] = POST_EX(SVM_EXIT_RDTSCP
),
4391 [x86_intercept_monitor
] = POST_MEM(SVM_EXIT_MONITOR
),
4392 [x86_intercept_mwait
] = POST_EX(SVM_EXIT_MWAIT
),
4393 [x86_intercept_invlpg
] = POST_EX(SVM_EXIT_INVLPG
),
4394 [x86_intercept_invd
] = POST_EX(SVM_EXIT_INVD
),
4395 [x86_intercept_wbinvd
] = POST_EX(SVM_EXIT_WBINVD
),
4396 [x86_intercept_wrmsr
] = POST_EX(SVM_EXIT_MSR
),
4397 [x86_intercept_rdtsc
] = POST_EX(SVM_EXIT_RDTSC
),
4398 [x86_intercept_rdmsr
] = POST_EX(SVM_EXIT_MSR
),
4399 [x86_intercept_rdpmc
] = POST_EX(SVM_EXIT_RDPMC
),
4400 [x86_intercept_cpuid
] = PRE_EX(SVM_EXIT_CPUID
),
4401 [x86_intercept_rsm
] = PRE_EX(SVM_EXIT_RSM
),
4402 [x86_intercept_pause
] = PRE_EX(SVM_EXIT_PAUSE
),
4403 [x86_intercept_pushf
] = PRE_EX(SVM_EXIT_PUSHF
),
4404 [x86_intercept_popf
] = PRE_EX(SVM_EXIT_POPF
),
4405 [x86_intercept_intn
] = PRE_EX(SVM_EXIT_SWINT
),
4406 [x86_intercept_iret
] = PRE_EX(SVM_EXIT_IRET
),
4407 [x86_intercept_icebp
] = PRE_EX(SVM_EXIT_ICEBP
),
4408 [x86_intercept_hlt
] = POST_EX(SVM_EXIT_HLT
),
4409 [x86_intercept_in
] = POST_EX(SVM_EXIT_IOIO
),
4410 [x86_intercept_ins
] = POST_EX(SVM_EXIT_IOIO
),
4411 [x86_intercept_out
] = POST_EX(SVM_EXIT_IOIO
),
4412 [x86_intercept_outs
] = POST_EX(SVM_EXIT_IOIO
),
4413 [x86_intercept_xsetbv
] = PRE_EX(SVM_EXIT_XSETBV
),
4420 static int svm_check_intercept(struct kvm_vcpu
*vcpu
,
4421 struct x86_instruction_info
*info
,
4422 enum x86_intercept_stage stage
,
4423 struct x86_exception
*exception
)
4425 struct vcpu_svm
*svm
= to_svm(vcpu
);
4426 int vmexit
, ret
= X86EMUL_CONTINUE
;
4427 struct __x86_intercept icpt_info
;
4428 struct vmcb
*vmcb
= svm
->vmcb
;
4430 if (info
->intercept
>= ARRAY_SIZE(x86_intercept_map
))
4433 icpt_info
= x86_intercept_map
[info
->intercept
];
4435 if (stage
!= icpt_info
.stage
)
4438 switch (icpt_info
.exit_code
) {
4439 case SVM_EXIT_READ_CR0
:
4440 if (info
->intercept
== x86_intercept_cr_read
)
4441 icpt_info
.exit_code
+= info
->modrm_reg
;
4443 case SVM_EXIT_WRITE_CR0
: {
4444 unsigned long cr0
, val
;
4446 if (info
->intercept
== x86_intercept_cr_write
)
4447 icpt_info
.exit_code
+= info
->modrm_reg
;
4449 if (icpt_info
.exit_code
!= SVM_EXIT_WRITE_CR0
||
4450 info
->intercept
== x86_intercept_clts
)
4453 if (!(vmcb12_is_intercept(&svm
->nested
.ctl
,
4454 INTERCEPT_SELECTIVE_CR0
)))
4457 cr0
= vcpu
->arch
.cr0
& ~SVM_CR0_SELECTIVE_MASK
;
4458 val
= info
->src_val
& ~SVM_CR0_SELECTIVE_MASK
;
4460 if (info
->intercept
== x86_intercept_lmsw
) {
4463 /* lmsw can't clear PE - catch this here */
4464 if (cr0
& X86_CR0_PE
)
4469 icpt_info
.exit_code
= SVM_EXIT_CR0_SEL_WRITE
;
4473 case SVM_EXIT_READ_DR0
:
4474 case SVM_EXIT_WRITE_DR0
:
4475 icpt_info
.exit_code
+= info
->modrm_reg
;
4478 if (info
->intercept
== x86_intercept_wrmsr
)
4479 vmcb
->control
.exit_info_1
= 1;
4481 vmcb
->control
.exit_info_1
= 0;
4483 case SVM_EXIT_PAUSE
:
4485 * We get this for NOP only, but pause
4486 * is rep not, check this here
4488 if (info
->rep_prefix
!= REPE_PREFIX
)
4491 case SVM_EXIT_IOIO
: {
4495 if (info
->intercept
== x86_intercept_in
||
4496 info
->intercept
== x86_intercept_ins
) {
4497 exit_info
= ((info
->src_val
& 0xffff) << 16) |
4499 bytes
= info
->dst_bytes
;
4501 exit_info
= (info
->dst_val
& 0xffff) << 16;
4502 bytes
= info
->src_bytes
;
4505 if (info
->intercept
== x86_intercept_outs
||
4506 info
->intercept
== x86_intercept_ins
)
4507 exit_info
|= SVM_IOIO_STR_MASK
;
4509 if (info
->rep_prefix
)
4510 exit_info
|= SVM_IOIO_REP_MASK
;
4512 bytes
= min(bytes
, 4u);
4514 exit_info
|= bytes
<< SVM_IOIO_SIZE_SHIFT
;
4516 exit_info
|= (u32
)info
->ad_bytes
<< (SVM_IOIO_ASIZE_SHIFT
- 1);
4518 vmcb
->control
.exit_info_1
= exit_info
;
4519 vmcb
->control
.exit_info_2
= info
->next_rip
;
4527 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4528 if (static_cpu_has(X86_FEATURE_NRIPS
))
4529 vmcb
->control
.next_rip
= info
->next_rip
;
4530 vmcb
->control
.exit_code
= icpt_info
.exit_code
;
4531 vmexit
= nested_svm_exit_handled(svm
);
4533 ret
= (vmexit
== NESTED_EXIT_DONE
) ? X86EMUL_INTERCEPTED
4540 static void svm_handle_exit_irqoff(struct kvm_vcpu
*vcpu
)
4542 if (to_svm(vcpu
)->vmcb
->control
.exit_code
== SVM_EXIT_INTR
)
4543 vcpu
->arch
.at_instruction_boundary
= true;
4546 static void svm_sched_in(struct kvm_vcpu
*vcpu
, int cpu
)
4548 if (!kvm_pause_in_guest(vcpu
->kvm
))
4549 shrink_ple_window(vcpu
);
4552 static void svm_setup_mce(struct kvm_vcpu
*vcpu
)
4554 /* [63:9] are reserved. */
4555 vcpu
->arch
.mcg_cap
&= 0x1ff;
4558 #ifdef CONFIG_KVM_SMM
4559 bool svm_smi_blocked(struct kvm_vcpu
*vcpu
)
4561 struct vcpu_svm
*svm
= to_svm(vcpu
);
4563 /* Per APM Vol.2 15.22.2 "Response to SMI" */
4567 return is_smm(vcpu
);
4570 static int svm_smi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
4572 struct vcpu_svm
*svm
= to_svm(vcpu
);
4573 if (svm
->nested
.nested_run_pending
)
4576 if (svm_smi_blocked(vcpu
))
4579 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
4580 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_smi(svm
))
4586 static int svm_enter_smm(struct kvm_vcpu
*vcpu
, union kvm_smram
*smram
)
4588 struct vcpu_svm
*svm
= to_svm(vcpu
);
4589 struct kvm_host_map map_save
;
4592 if (!is_guest_mode(vcpu
))
4596 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is
4597 * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4600 if (!guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
4603 smram
->smram64
.svm_guest_flag
= 1;
4604 smram
->smram64
.svm_guest_vmcb_gpa
= svm
->nested
.vmcb12_gpa
;
4606 svm
->vmcb
->save
.rax
= vcpu
->arch
.regs
[VCPU_REGS_RAX
];
4607 svm
->vmcb
->save
.rsp
= vcpu
->arch
.regs
[VCPU_REGS_RSP
];
4608 svm
->vmcb
->save
.rip
= vcpu
->arch
.regs
[VCPU_REGS_RIP
];
4610 ret
= nested_svm_simple_vmexit(svm
, SVM_EXIT_SW
);
4615 * KVM uses VMCB01 to store L1 host state while L2 runs but
4616 * VMCB01 is going to be used during SMM and thus the state will
4617 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4618 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4619 * format of the area is identical to guest save area offsetted
4620 * by 0x400 (matches the offset of 'struct vmcb_save_area'
4621 * within 'struct vmcb'). Note: HSAVE area may also be used by
4622 * L1 hypervisor to save additional host context (e.g. KVM does
4623 * that, see svm_prepare_switch_to_guest()) which must be
4626 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(svm
->nested
.hsave_msr
), &map_save
))
4629 BUILD_BUG_ON(offsetof(struct vmcb
, save
) != 0x400);
4631 svm_copy_vmrun_state(map_save
.hva
+ 0x400,
4632 &svm
->vmcb01
.ptr
->save
);
4634 kvm_vcpu_unmap(vcpu
, &map_save
, true);
4638 static int svm_leave_smm(struct kvm_vcpu
*vcpu
, const union kvm_smram
*smram
)
4640 struct vcpu_svm
*svm
= to_svm(vcpu
);
4641 struct kvm_host_map map
, map_save
;
4642 struct vmcb
*vmcb12
;
4645 const struct kvm_smram_state_64
*smram64
= &smram
->smram64
;
4647 if (!guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
4650 /* Non-zero if SMI arrived while vCPU was in guest mode. */
4651 if (!smram64
->svm_guest_flag
)
4654 if (!guest_cpuid_has(vcpu
, X86_FEATURE_SVM
))
4657 if (!(smram64
->efer
& EFER_SVME
))
4660 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(smram64
->svm_guest_vmcb_gpa
), &map
))
4664 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(svm
->nested
.hsave_msr
), &map_save
))
4667 if (svm_allocate_nested(svm
))
4671 * Restore L1 host state from L1 HSAVE area as VMCB01 was
4672 * used during SMM (see svm_enter_smm())
4675 svm_copy_vmrun_state(&svm
->vmcb01
.ptr
->save
, map_save
.hva
+ 0x400);
4678 * Enter the nested guest now
4681 vmcb_mark_all_dirty(svm
->vmcb01
.ptr
);
4684 nested_copy_vmcb_control_to_cache(svm
, &vmcb12
->control
);
4685 nested_copy_vmcb_save_to_cache(svm
, &vmcb12
->save
);
4686 ret
= enter_svm_guest_mode(vcpu
, smram64
->svm_guest_vmcb_gpa
, vmcb12
, false);
4691 svm
->nested
.nested_run_pending
= 1;
4694 kvm_vcpu_unmap(vcpu
, &map_save
, true);
4696 kvm_vcpu_unmap(vcpu
, &map
, true);
4700 static void svm_enable_smi_window(struct kvm_vcpu
*vcpu
)
4702 struct vcpu_svm
*svm
= to_svm(vcpu
);
4704 if (!gif_set(svm
)) {
4706 svm_set_intercept(svm
, INTERCEPT_STGI
);
4707 /* STGI will cause a vm exit */
4709 /* We must be in SMM; RSM will cause a vmexit anyway. */
4714 static int svm_check_emulate_instruction(struct kvm_vcpu
*vcpu
, int emul_type
,
4715 void *insn
, int insn_len
)
4717 bool smep
, smap
, is_user
;
4720 /* Emulation is always possible when KVM has access to all guest state. */
4721 if (!sev_guest(vcpu
->kvm
))
4722 return X86EMUL_CONTINUE
;
4724 /* #UD and #GP should never be intercepted for SEV guests. */
4725 WARN_ON_ONCE(emul_type
& (EMULTYPE_TRAP_UD
|
4726 EMULTYPE_TRAP_UD_FORCED
|
4727 EMULTYPE_VMWARE_GP
));
4730 * Emulation is impossible for SEV-ES guests as KVM doesn't have access
4731 * to guest register state.
4733 if (sev_es_guest(vcpu
->kvm
))
4734 return X86EMUL_RETRY_INSTR
;
4737 * Emulation is possible if the instruction is already decoded, e.g.
4738 * when completing I/O after returning from userspace.
4740 if (emul_type
& EMULTYPE_NO_DECODE
)
4741 return X86EMUL_CONTINUE
;
4744 * Emulation is possible for SEV guests if and only if a prefilled
4745 * buffer containing the bytes of the intercepted instruction is
4746 * available. SEV guest memory is encrypted with a guest specific key
4747 * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
4750 * If KVM is NOT trying to simply skip an instruction, inject #UD if
4751 * KVM reached this point without an instruction buffer. In practice,
4752 * this path should never be hit by a well-behaved guest, e.g. KVM
4753 * doesn't intercept #UD or #GP for SEV guests, but this path is still
4754 * theoretically reachable, e.g. via unaccelerated fault-like AVIC
4755 * access, and needs to be handled by KVM to avoid putting the guest
4756 * into an infinite loop. Injecting #UD is somewhat arbitrary, but
4757 * its the least awful option given lack of insight into the guest.
4759 * If KVM is trying to skip an instruction, simply resume the guest.
4760 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
4761 * will attempt to re-inject the INT3/INTO and skip the instruction.
4762 * In that scenario, retrying the INT3/INTO and hoping the guest will
4763 * make forward progress is the only option that has a chance of
4764 * success (and in practice it will work the vast majority of the time).
4766 if (unlikely(!insn
)) {
4767 if (emul_type
& EMULTYPE_SKIP
)
4768 return X86EMUL_UNHANDLEABLE
;
4770 kvm_queue_exception(vcpu
, UD_VECTOR
);
4771 return X86EMUL_PROPAGATE_FAULT
;
4775 * Emulate for SEV guests if the insn buffer is not empty. The buffer
4776 * will be empty if the DecodeAssist microcode cannot fetch bytes for
4777 * the faulting instruction because the code fetch itself faulted, e.g.
4778 * the guest attempted to fetch from emulated MMIO or a guest page
4779 * table used to translate CS:RIP resides in emulated MMIO.
4781 if (likely(insn_len
))
4782 return X86EMUL_CONTINUE
;
4785 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4788 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4789 * possible that CPU microcode implementing DecodeAssist will fail to
4790 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4791 * be '0'. This happens because microcode reads CS:RIP using a _data_
4792 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
4793 * gives up and does not fill the instruction bytes buffer.
4795 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4796 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4797 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4798 * GuestIntrBytes field of the VMCB.
4800 * This does _not_ mean that the erratum has been encountered, as the
4801 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4802 * #PF, e.g. if the guest attempt to execute from emulated MMIO and
4803 * encountered a reserved/not-present #PF.
4805 * To hit the erratum, the following conditions must be true:
4806 * 1. CR4.SMAP=1 (obviously).
4807 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
4808 * have been hit as the guest would have encountered a SMEP
4809 * violation #PF, not a #NPF.
4810 * 3. The #NPF is not due to a code fetch, in which case failure to
4811 * retrieve the instruction bytes is legitimate (see abvoe).
4813 * In addition, don't apply the erratum workaround if the #NPF occurred
4814 * while translating guest page tables (see below).
4816 error_code
= to_svm(vcpu
)->vmcb
->control
.exit_info_1
;
4817 if (error_code
& (PFERR_GUEST_PAGE_MASK
| PFERR_FETCH_MASK
))
4820 smep
= kvm_is_cr4_bit_set(vcpu
, X86_CR4_SMEP
);
4821 smap
= kvm_is_cr4_bit_set(vcpu
, X86_CR4_SMAP
);
4822 is_user
= svm_get_cpl(vcpu
) == 3;
4823 if (smap
&& (!smep
|| is_user
)) {
4824 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
4827 * If the fault occurred in userspace, arbitrarily inject #GP
4828 * to avoid killing the guest and to hopefully avoid confusing
4829 * the guest kernel too much, e.g. injecting #PF would not be
4830 * coherent with respect to the guest's page tables. Request
4831 * triple fault if the fault occurred in the kernel as there's
4832 * no fault that KVM can inject without confusing the guest.
4833 * In practice, the triple fault is moot as no sane SEV kernel
4834 * will execute from user memory while also running with SMAP=1.
4837 kvm_inject_gp(vcpu
, 0);
4839 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
4840 return X86EMUL_PROPAGATE_FAULT
;
4845 * If the erratum was not hit, simply resume the guest and let it fault
4846 * again. While awful, e.g. the vCPU may get stuck in an infinite loop
4847 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to
4848 * userspace will kill the guest, and letting the emulator read garbage
4849 * will yield random behavior and potentially corrupt the guest.
4851 * Simply resuming the guest is technically not a violation of the SEV
4852 * architecture. AMD's APM states that all code fetches and page table
4853 * accesses for SEV guest are encrypted, regardless of the C-Bit. The
4854 * APM also states that encrypted accesses to MMIO are "ignored", but
4855 * doesn't explicitly define "ignored", i.e. doing nothing and letting
4856 * the guest spin is technically "ignoring" the access.
4858 return X86EMUL_RETRY_INSTR
;
4861 static bool svm_apic_init_signal_blocked(struct kvm_vcpu
*vcpu
)
4863 struct vcpu_svm
*svm
= to_svm(vcpu
);
4865 return !gif_set(svm
);
4868 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
)
4870 if (!sev_es_guest(vcpu
->kvm
))
4871 return kvm_vcpu_deliver_sipi_vector(vcpu
, vector
);
4873 sev_vcpu_deliver_sipi_vector(vcpu
, vector
);
4876 static void svm_vm_destroy(struct kvm
*kvm
)
4878 avic_vm_destroy(kvm
);
4879 sev_vm_destroy(kvm
);
4882 static int svm_vm_init(struct kvm
*kvm
)
4884 if (!pause_filter_count
|| !pause_filter_thresh
)
4885 kvm
->arch
.pause_in_guest
= true;
4888 int ret
= avic_vm_init(kvm
);
4896 static struct kvm_x86_ops svm_x86_ops __initdata
= {
4897 .name
= KBUILD_MODNAME
,
4899 .check_processor_compatibility
= svm_check_processor_compat
,
4901 .hardware_unsetup
= svm_hardware_unsetup
,
4902 .hardware_enable
= svm_hardware_enable
,
4903 .hardware_disable
= svm_hardware_disable
,
4904 .has_emulated_msr
= svm_has_emulated_msr
,
4906 .vcpu_create
= svm_vcpu_create
,
4907 .vcpu_free
= svm_vcpu_free
,
4908 .vcpu_reset
= svm_vcpu_reset
,
4910 .vm_size
= sizeof(struct kvm_svm
),
4911 .vm_init
= svm_vm_init
,
4912 .vm_destroy
= svm_vm_destroy
,
4914 .prepare_switch_to_guest
= svm_prepare_switch_to_guest
,
4915 .vcpu_load
= svm_vcpu_load
,
4916 .vcpu_put
= svm_vcpu_put
,
4917 .vcpu_blocking
= avic_vcpu_blocking
,
4918 .vcpu_unblocking
= avic_vcpu_unblocking
,
4920 .update_exception_bitmap
= svm_update_exception_bitmap
,
4921 .get_msr_feature
= svm_get_msr_feature
,
4922 .get_msr
= svm_get_msr
,
4923 .set_msr
= svm_set_msr
,
4924 .get_segment_base
= svm_get_segment_base
,
4925 .get_segment
= svm_get_segment
,
4926 .set_segment
= svm_set_segment
,
4927 .get_cpl
= svm_get_cpl
,
4928 .get_cs_db_l_bits
= svm_get_cs_db_l_bits
,
4929 .is_valid_cr0
= svm_is_valid_cr0
,
4930 .set_cr0
= svm_set_cr0
,
4931 .post_set_cr3
= sev_post_set_cr3
,
4932 .is_valid_cr4
= svm_is_valid_cr4
,
4933 .set_cr4
= svm_set_cr4
,
4934 .set_efer
= svm_set_efer
,
4935 .get_idt
= svm_get_idt
,
4936 .set_idt
= svm_set_idt
,
4937 .get_gdt
= svm_get_gdt
,
4938 .set_gdt
= svm_set_gdt
,
4939 .set_dr7
= svm_set_dr7
,
4940 .sync_dirty_debug_regs
= svm_sync_dirty_debug_regs
,
4941 .cache_reg
= svm_cache_reg
,
4942 .get_rflags
= svm_get_rflags
,
4943 .set_rflags
= svm_set_rflags
,
4944 .get_if_flag
= svm_get_if_flag
,
4946 .flush_tlb_all
= svm_flush_tlb_all
,
4947 .flush_tlb_current
= svm_flush_tlb_current
,
4948 .flush_tlb_gva
= svm_flush_tlb_gva
,
4949 .flush_tlb_guest
= svm_flush_tlb_asid
,
4951 .vcpu_pre_run
= svm_vcpu_pre_run
,
4952 .vcpu_run
= svm_vcpu_run
,
4953 .handle_exit
= svm_handle_exit
,
4954 .skip_emulated_instruction
= svm_skip_emulated_instruction
,
4955 .update_emulated_instruction
= NULL
,
4956 .set_interrupt_shadow
= svm_set_interrupt_shadow
,
4957 .get_interrupt_shadow
= svm_get_interrupt_shadow
,
4958 .patch_hypercall
= svm_patch_hypercall
,
4959 .inject_irq
= svm_inject_irq
,
4960 .inject_nmi
= svm_inject_nmi
,
4961 .is_vnmi_pending
= svm_is_vnmi_pending
,
4962 .set_vnmi_pending
= svm_set_vnmi_pending
,
4963 .inject_exception
= svm_inject_exception
,
4964 .cancel_injection
= svm_cancel_injection
,
4965 .interrupt_allowed
= svm_interrupt_allowed
,
4966 .nmi_allowed
= svm_nmi_allowed
,
4967 .get_nmi_mask
= svm_get_nmi_mask
,
4968 .set_nmi_mask
= svm_set_nmi_mask
,
4969 .enable_nmi_window
= svm_enable_nmi_window
,
4970 .enable_irq_window
= svm_enable_irq_window
,
4971 .update_cr8_intercept
= svm_update_cr8_intercept
,
4972 .set_virtual_apic_mode
= avic_refresh_virtual_apic_mode
,
4973 .refresh_apicv_exec_ctrl
= avic_refresh_apicv_exec_ctrl
,
4974 .apicv_post_state_restore
= avic_apicv_post_state_restore
,
4975 .required_apicv_inhibits
= AVIC_REQUIRED_APICV_INHIBITS
,
4977 .get_exit_info
= svm_get_exit_info
,
4979 .vcpu_after_set_cpuid
= svm_vcpu_after_set_cpuid
,
4981 .has_wbinvd_exit
= svm_has_wbinvd_exit
,
4983 .get_l2_tsc_offset
= svm_get_l2_tsc_offset
,
4984 .get_l2_tsc_multiplier
= svm_get_l2_tsc_multiplier
,
4985 .write_tsc_offset
= svm_write_tsc_offset
,
4986 .write_tsc_multiplier
= svm_write_tsc_multiplier
,
4988 .load_mmu_pgd
= svm_load_mmu_pgd
,
4990 .check_intercept
= svm_check_intercept
,
4991 .handle_exit_irqoff
= svm_handle_exit_irqoff
,
4993 .request_immediate_exit
= __kvm_request_immediate_exit
,
4995 .sched_in
= svm_sched_in
,
4997 .nested_ops
= &svm_nested_ops
,
4999 .deliver_interrupt
= svm_deliver_interrupt
,
5000 .pi_update_irte
= avic_pi_update_irte
,
5001 .setup_mce
= svm_setup_mce
,
5003 #ifdef CONFIG_KVM_SMM
5004 .smi_allowed
= svm_smi_allowed
,
5005 .enter_smm
= svm_enter_smm
,
5006 .leave_smm
= svm_leave_smm
,
5007 .enable_smi_window
= svm_enable_smi_window
,
5010 .mem_enc_ioctl
= sev_mem_enc_ioctl
,
5011 .mem_enc_register_region
= sev_mem_enc_register_region
,
5012 .mem_enc_unregister_region
= sev_mem_enc_unregister_region
,
5013 .guest_memory_reclaimed
= sev_guest_memory_reclaimed
,
5015 .vm_copy_enc_context_from
= sev_vm_copy_enc_context_from
,
5016 .vm_move_enc_context_from
= sev_vm_move_enc_context_from
,
5018 .check_emulate_instruction
= svm_check_emulate_instruction
,
5020 .apic_init_signal_blocked
= svm_apic_init_signal_blocked
,
5022 .msr_filter_changed
= svm_msr_filter_changed
,
5023 .complete_emulated_msr
= svm_complete_emulated_msr
,
5025 .vcpu_deliver_sipi_vector
= svm_vcpu_deliver_sipi_vector
,
5026 .vcpu_get_apicv_inhibit_reasons
= avic_vcpu_get_apicv_inhibit_reasons
,
5030 * The default MMIO mask is a single bit (excluding the present bit),
5031 * which could conflict with the memory encryption bit. Check for
5032 * memory encryption support and override the default MMIO mask if
5033 * memory encryption is enabled.
5035 static __init
void svm_adjust_mmio_mask(void)
5037 unsigned int enc_bit
, mask_bit
;
5040 /* If there is no memory encryption support, use existing mask */
5041 if (cpuid_eax(0x80000000) < 0x8000001f)
5044 /* If memory encryption is not enabled, use existing mask */
5045 rdmsrl(MSR_AMD64_SYSCFG
, msr
);
5046 if (!(msr
& MSR_AMD64_SYSCFG_MEM_ENCRYPT
))
5049 enc_bit
= cpuid_ebx(0x8000001f) & 0x3f;
5050 mask_bit
= boot_cpu_data
.x86_phys_bits
;
5052 /* Increment the mask bit if it is the same as the encryption bit */
5053 if (enc_bit
== mask_bit
)
5057 * If the mask bit location is below 52, then some bits above the
5058 * physical addressing limit will always be reserved, so use the
5059 * rsvd_bits() function to generate the mask. This mask, along with
5060 * the present bit, will be used to generate a page fault with
5063 * If the mask bit location is 52 (or above), then clear the mask.
5065 mask
= (mask_bit
< 52) ? rsvd_bits(mask_bit
, 51) | PT_PRESENT_MASK
: 0;
5067 kvm_mmu_set_mmio_spte_mask(mask
, mask
, PT_WRITABLE_MASK
| PT_USER_MASK
);
5070 static __init
void svm_set_cpu_caps(void)
5074 kvm_caps
.supported_perf_cap
= 0;
5075 kvm_caps
.supported_xss
= 0;
5077 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
5079 kvm_cpu_cap_set(X86_FEATURE_SVM
);
5080 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN
);
5083 kvm_cpu_cap_set(X86_FEATURE_NRIPS
);
5086 kvm_cpu_cap_set(X86_FEATURE_NPT
);
5089 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR
);
5092 kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD
);
5094 kvm_cpu_cap_set(X86_FEATURE_LBRV
);
5096 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER
))
5097 kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER
);
5099 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD
))
5100 kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD
);
5103 kvm_cpu_cap_set(X86_FEATURE_VGIF
);
5106 kvm_cpu_cap_set(X86_FEATURE_VNMI
);
5108 /* Nested VM can receive #VMEXIT instead of triggering #GP */
5109 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK
);
5112 /* CPUID 0x80000008 */
5113 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
) ||
5114 boot_cpu_has(X86_FEATURE_AMD_SSBD
))
5115 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD
);
5119 * Enumerate support for PERFCTR_CORE if and only if KVM has
5120 * access to enough counters to virtualize "core" support,
5121 * otherwise limit vPMU support to the legacy number of counters.
5123 if (kvm_pmu_cap
.num_counters_gp
< AMD64_NUM_COUNTERS_CORE
)
5124 kvm_pmu_cap
.num_counters_gp
= min(AMD64_NUM_COUNTERS
,
5125 kvm_pmu_cap
.num_counters_gp
);
5127 kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE
);
5129 if (kvm_pmu_cap
.version
!= 2 ||
5130 !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE
))
5131 kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2
);
5134 /* CPUID 0x8000001F (SME/SEV features) */
5138 static __init
int svm_hardware_setup(void)
5141 struct page
*iopm_pages
;
5144 unsigned int order
= get_order(IOPM_SIZE
);
5147 * NX is required for shadow paging and for NPT if the NX huge pages
5148 * mitigation is enabled.
5150 if (!boot_cpu_has(X86_FEATURE_NX
)) {
5151 pr_err_ratelimited("NX (Execute Disable) not supported\n");
5154 kvm_enable_efer_bits(EFER_NX
);
5156 iopm_pages
= alloc_pages(GFP_KERNEL
, order
);
5161 iopm_va
= page_address(iopm_pages
);
5162 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << order
));
5163 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
5165 init_msrpm_offsets();
5167 kvm_caps
.supported_xcr0
&= ~(XFEATURE_MASK_BNDREGS
|
5168 XFEATURE_MASK_BNDCSR
);
5170 if (boot_cpu_has(X86_FEATURE_FXSR_OPT
))
5171 kvm_enable_efer_bits(EFER_FFXSR
);
5174 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR
)) {
5175 tsc_scaling
= false;
5177 pr_info("TSC scaling supported\n");
5178 kvm_caps
.has_tsc_control
= true;
5181 kvm_caps
.max_tsc_scaling_ratio
= SVM_TSC_RATIO_MAX
;
5182 kvm_caps
.tsc_scaling_ratio_frac_bits
= 32;
5184 tsc_aux_uret_slot
= kvm_add_user_return_msr(MSR_TSC_AUX
);
5186 if (boot_cpu_has(X86_FEATURE_AUTOIBRS
))
5187 kvm_enable_efer_bits(EFER_AUTOIBRS
);
5189 /* Check for pause filtering support */
5190 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER
)) {
5191 pause_filter_count
= 0;
5192 pause_filter_thresh
= 0;
5193 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD
)) {
5194 pause_filter_thresh
= 0;
5198 pr_info("Nested Virtualization enabled\n");
5199 kvm_enable_efer_bits(EFER_SVME
| EFER_LMSLE
);
5203 * KVM's MMU doesn't support using 2-level paging for itself, and thus
5204 * NPT isn't supported if the host is using 2-level paging since host
5205 * CR4 is unchanged on VMRUN.
5207 if (!IS_ENABLED(CONFIG_X86_64
) && !IS_ENABLED(CONFIG_X86_PAE
))
5208 npt_enabled
= false;
5210 if (!boot_cpu_has(X86_FEATURE_NPT
))
5211 npt_enabled
= false;
5213 /* Force VM NPT level equal to the host's paging level */
5214 kvm_configure_mmu(npt_enabled
, get_npt_level(),
5215 get_npt_level(), PG_LEVEL_1G
);
5216 pr_info("Nested Paging %sabled\n", npt_enabled
? "en" : "dis");
5218 /* Setup shadow_me_value and shadow_me_mask */
5219 kvm_mmu_set_me_spte_mask(sme_me_mask
, sme_me_mask
);
5221 svm_adjust_mmio_mask();
5223 nrips
= nrips
&& boot_cpu_has(X86_FEATURE_NRIPS
);
5226 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5227 * may be modified by svm_adjust_mmio_mask()), as well as nrips.
5229 sev_hardware_setup();
5231 svm_hv_hardware_setup();
5233 for_each_possible_cpu(cpu
) {
5234 r
= svm_cpu_init(cpu
);
5239 enable_apicv
= avic
= avic
&& avic_hardware_setup();
5241 if (!enable_apicv
) {
5242 svm_x86_ops
.vcpu_blocking
= NULL
;
5243 svm_x86_ops
.vcpu_unblocking
= NULL
;
5244 svm_x86_ops
.vcpu_get_apicv_inhibit_reasons
= NULL
;
5245 } else if (!x2avic_enabled
) {
5246 svm_x86_ops
.allow_apicv_in_x2apic_without_x2apic_virtualization
= true;
5251 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD
) ||
5252 !IS_ENABLED(CONFIG_X86_64
)) {
5255 pr_info("Virtual VMLOAD VMSAVE supported\n");
5259 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK
))
5260 svm_gp_erratum_intercept
= false;
5263 if (!boot_cpu_has(X86_FEATURE_VGIF
))
5266 pr_info("Virtual GIF supported\n");
5269 vnmi
= vgif
&& vnmi
&& boot_cpu_has(X86_FEATURE_VNMI
);
5271 pr_info("Virtual NMI enabled\n");
5274 svm_x86_ops
.is_vnmi_pending
= NULL
;
5275 svm_x86_ops
.set_vnmi_pending
= NULL
;
5280 if (!boot_cpu_has(X86_FEATURE_LBRV
))
5283 pr_info("LBR virtualization supported\n");
5287 pr_info("PMU virtualization is disabled\n");
5292 * It seems that on AMD processors PTE's accessed bit is
5293 * being set by the CPU hardware before the NPF vmexit.
5294 * This is not expected behaviour and our tests fail because
5296 * A workaround here is to disable support for
5297 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5298 * In this case userspace can know if there is support using
5299 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5301 * If future AMD CPU models change the behaviour described above,
5302 * this variable can be changed accordingly
5304 allow_smaller_maxphyaddr
= !npt_enabled
;
5309 svm_hardware_unsetup();
5314 static struct kvm_x86_init_ops svm_init_ops __initdata
= {
5315 .hardware_setup
= svm_hardware_setup
,
5317 .runtime_ops
= &svm_x86_ops
,
5318 .pmu_ops
= &amd_pmu_ops
,
5321 static void __svm_exit(void)
5323 kvm_x86_vendor_exit();
5325 cpu_emergency_unregister_virt_callback(svm_emergency_disable
);
5328 static int __init
svm_init(void)
5332 __unused_size_checks();
5334 if (!kvm_is_svm_supported())
5337 r
= kvm_x86_vendor_init(&svm_init_ops
);
5341 cpu_emergency_register_virt_callback(svm_emergency_disable
);
5344 * Common KVM initialization _must_ come last, after this, /dev/kvm is
5345 * exposed to userspace!
5347 r
= kvm_init(sizeof(struct vcpu_svm
), __alignof__(struct vcpu_svm
),
5359 static void __exit
svm_exit(void)
5365 module_init(svm_init
)
5366 module_exit(svm_exit
)