1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kvm_host.h>
7 #include "kvm_cache_regs.h"
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
33 #include <asm/perf_event.h>
34 #include <asm/tlbflush.h>
36 #include <asm/debugreg.h>
37 #include <asm/kvm_para.h>
38 #include <asm/irq_remapping.h>
39 #include <asm/spec-ctrl.h>
40 #include <asm/cpu_device_id.h>
41 #include <asm/traps.h>
42 #include <asm/reboot.h>
43 #include <asm/fpu/api.h>
45 #include <trace/events/ipi.h>
52 #include "kvm_onhyperv.h"
53 #include "svm_onhyperv.h"
55 MODULE_AUTHOR("Qumranet");
56 MODULE_LICENSE("GPL");
59 static const struct x86_cpu_id svm_cpu_id
[] = {
60 X86_MATCH_FEATURE(X86_FEATURE_SVM
, NULL
),
63 MODULE_DEVICE_TABLE(x86cpu
, svm_cpu_id
);
66 #define SEG_TYPE_LDT 2
67 #define SEG_TYPE_BUSY_TSS16 3
69 static bool erratum_383_found __read_mostly
;
71 u32 msrpm_offsets
[MSRPM_OFFSETS
] __read_mostly
;
74 * Set osvw_len to higher value when updated Revision Guides
75 * are published and we know what the new status bits are
77 static uint64_t osvw_len
= 4, osvw_status
;
79 static DEFINE_PER_CPU(u64
, current_tsc_ratio
);
81 #define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
83 static const struct svm_direct_access_msrs
{
84 u32 index
; /* Index of the MSR */
85 bool always
; /* True if intercept is initially cleared */
86 } direct_access_msrs
[MAX_DIRECT_ACCESS_MSRS
] = {
87 { .index
= MSR_STAR
, .always
= true },
88 { .index
= MSR_IA32_SYSENTER_CS
, .always
= true },
89 { .index
= MSR_IA32_SYSENTER_EIP
, .always
= false },
90 { .index
= MSR_IA32_SYSENTER_ESP
, .always
= false },
92 { .index
= MSR_GS_BASE
, .always
= true },
93 { .index
= MSR_FS_BASE
, .always
= true },
94 { .index
= MSR_KERNEL_GS_BASE
, .always
= true },
95 { .index
= MSR_LSTAR
, .always
= true },
96 { .index
= MSR_CSTAR
, .always
= true },
97 { .index
= MSR_SYSCALL_MASK
, .always
= true },
99 { .index
= MSR_IA32_SPEC_CTRL
, .always
= false },
100 { .index
= MSR_IA32_PRED_CMD
, .always
= false },
101 { .index
= MSR_IA32_FLUSH_CMD
, .always
= false },
102 { .index
= MSR_IA32_LASTBRANCHFROMIP
, .always
= false },
103 { .index
= MSR_IA32_LASTBRANCHTOIP
, .always
= false },
104 { .index
= MSR_IA32_LASTINTFROMIP
, .always
= false },
105 { .index
= MSR_IA32_LASTINTTOIP
, .always
= false },
106 { .index
= MSR_EFER
, .always
= false },
107 { .index
= MSR_IA32_CR_PAT
, .always
= false },
108 { .index
= MSR_AMD64_SEV_ES_GHCB
, .always
= true },
109 { .index
= MSR_TSC_AUX
, .always
= false },
110 { .index
= X2APIC_MSR(APIC_ID
), .always
= false },
111 { .index
= X2APIC_MSR(APIC_LVR
), .always
= false },
112 { .index
= X2APIC_MSR(APIC_TASKPRI
), .always
= false },
113 { .index
= X2APIC_MSR(APIC_ARBPRI
), .always
= false },
114 { .index
= X2APIC_MSR(APIC_PROCPRI
), .always
= false },
115 { .index
= X2APIC_MSR(APIC_EOI
), .always
= false },
116 { .index
= X2APIC_MSR(APIC_RRR
), .always
= false },
117 { .index
= X2APIC_MSR(APIC_LDR
), .always
= false },
118 { .index
= X2APIC_MSR(APIC_DFR
), .always
= false },
119 { .index
= X2APIC_MSR(APIC_SPIV
), .always
= false },
120 { .index
= X2APIC_MSR(APIC_ISR
), .always
= false },
121 { .index
= X2APIC_MSR(APIC_TMR
), .always
= false },
122 { .index
= X2APIC_MSR(APIC_IRR
), .always
= false },
123 { .index
= X2APIC_MSR(APIC_ESR
), .always
= false },
124 { .index
= X2APIC_MSR(APIC_ICR
), .always
= false },
125 { .index
= X2APIC_MSR(APIC_ICR2
), .always
= false },
129 * AMD does not virtualize APIC TSC-deadline timer mode, but it is
130 * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
131 * the AVIC hardware would generate GP fault. Therefore, always
132 * intercept the MSR 0x832, and do not setup direct_access_msr.
134 { .index
= X2APIC_MSR(APIC_LVTTHMR
), .always
= false },
135 { .index
= X2APIC_MSR(APIC_LVTPC
), .always
= false },
136 { .index
= X2APIC_MSR(APIC_LVT0
), .always
= false },
137 { .index
= X2APIC_MSR(APIC_LVT1
), .always
= false },
138 { .index
= X2APIC_MSR(APIC_LVTERR
), .always
= false },
139 { .index
= X2APIC_MSR(APIC_TMICT
), .always
= false },
140 { .index
= X2APIC_MSR(APIC_TMCCT
), .always
= false },
141 { .index
= X2APIC_MSR(APIC_TDCR
), .always
= false },
142 { .index
= MSR_INVALID
, .always
= false },
146 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
147 * pause_filter_count: On processors that support Pause filtering(indicated
148 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
149 * count value. On VMRUN this value is loaded into an internal counter.
150 * Each time a pause instruction is executed, this counter is decremented
151 * until it reaches zero at which time a #VMEXIT is generated if pause
152 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
153 * Intercept Filtering for more details.
154 * This also indicate if ple logic enabled.
156 * pause_filter_thresh: In addition, some processor families support advanced
157 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
158 * the amount of time a guest is allowed to execute in a pause loop.
159 * In this mode, a 16-bit pause filter threshold field is added in the
160 * VMCB. The threshold value is a cycle count that is used to reset the
161 * pause counter. As with simple pause filtering, VMRUN loads the pause
162 * count value from VMCB into an internal counter. Then, on each pause
163 * instruction the hardware checks the elapsed number of cycles since
164 * the most recent pause instruction against the pause filter threshold.
165 * If the elapsed cycle count is greater than the pause filter threshold,
166 * then the internal pause count is reloaded from the VMCB and execution
167 * continues. If the elapsed cycle count is less than the pause filter
168 * threshold, then the internal pause count is decremented. If the count
169 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
170 * triggered. If advanced pause filtering is supported and pause filter
171 * threshold field is set to zero, the filter will operate in the simpler,
175 static unsigned short pause_filter_thresh
= KVM_DEFAULT_PLE_GAP
;
176 module_param(pause_filter_thresh
, ushort
, 0444);
178 static unsigned short pause_filter_count
= KVM_SVM_DEFAULT_PLE_WINDOW
;
179 module_param(pause_filter_count
, ushort
, 0444);
181 /* Default doubles per-vcpu window every exit. */
182 static unsigned short pause_filter_count_grow
= KVM_DEFAULT_PLE_WINDOW_GROW
;
183 module_param(pause_filter_count_grow
, ushort
, 0444);
185 /* Default resets per-vcpu window every exit to pause_filter_count. */
186 static unsigned short pause_filter_count_shrink
= KVM_DEFAULT_PLE_WINDOW_SHRINK
;
187 module_param(pause_filter_count_shrink
, ushort
, 0444);
189 /* Default is to compute the maximum so we can never overflow. */
190 static unsigned short pause_filter_count_max
= KVM_SVM_DEFAULT_PLE_WINDOW_MAX
;
191 module_param(pause_filter_count_max
, ushort
, 0444);
194 * Use nested page tables by default. Note, NPT may get forced off by
195 * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
197 bool npt_enabled
= true;
198 module_param_named(npt
, npt_enabled
, bool, 0444);
200 /* allow nested virtualization in KVM/SVM */
201 static int nested
= true;
202 module_param(nested
, int, 0444);
204 /* enable/disable Next RIP Save */
206 module_param(nrips
, int, 0444);
208 /* enable/disable Virtual VMLOAD VMSAVE */
209 static int vls
= true;
210 module_param(vls
, int, 0444);
212 /* enable/disable Virtual GIF */
214 module_param(vgif
, int, 0444);
216 /* enable/disable LBR virtualization */
217 static int lbrv
= true;
218 module_param(lbrv
, int, 0444);
220 static int tsc_scaling
= true;
221 module_param(tsc_scaling
, int, 0444);
224 * enable / disable AVIC. Because the defaults differ for APICv
225 * support between VMX and SVM we cannot use module_param_named.
228 module_param(avic
, bool, 0444);
230 bool __read_mostly dump_invalid_vmcb
;
231 module_param(dump_invalid_vmcb
, bool, 0644);
234 bool intercept_smi
= true;
235 module_param(intercept_smi
, bool, 0444);
238 module_param(vnmi
, bool, 0444);
240 static bool svm_gp_erratum_intercept
= true;
242 static u8 rsm_ins_bytes
[] = "\x0f\xaa";
244 static unsigned long iopm_base
;
246 DEFINE_PER_CPU(struct svm_cpu_data
, svm_data
);
249 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
250 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
252 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
253 * defer the restoration of TSC_AUX until the CPU returns to userspace.
255 static int tsc_aux_uret_slot __read_mostly
= -1;
257 static const u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
259 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
260 #define MSRS_RANGE_SIZE 2048
261 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
263 u32
svm_msrpm_offset(u32 msr
)
268 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
269 if (msr
< msrpm_ranges
[i
] ||
270 msr
>= msrpm_ranges
[i
] + MSRS_IN_RANGE
)
273 offset
= (msr
- msrpm_ranges
[i
]) / 4; /* 4 msrs per u8 */
274 offset
+= (i
* MSRS_RANGE_SIZE
); /* add range offset */
276 /* Now we have the u8 offset - but need the u32 offset */
280 /* MSR not in any range */
284 static void svm_flush_tlb_current(struct kvm_vcpu
*vcpu
);
286 static int get_npt_level(void)
289 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL
: PT64_ROOT_4LEVEL
;
291 return PT32E_ROOT_LEVEL
;
295 int svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
297 struct vcpu_svm
*svm
= to_svm(vcpu
);
298 u64 old_efer
= vcpu
->arch
.efer
;
299 vcpu
->arch
.efer
= efer
;
302 /* Shadow paging assumes NX to be available. */
305 if (!(efer
& EFER_LMA
))
309 if ((old_efer
& EFER_SVME
) != (efer
& EFER_SVME
)) {
310 if (!(efer
& EFER_SVME
)) {
311 svm_leave_nested(vcpu
);
312 svm_set_gif(svm
, true);
313 /* #GP intercept is still needed for vmware backdoor */
314 if (!enable_vmware_backdoor
)
315 clr_exception_intercept(svm
, GP_VECTOR
);
318 * Free the nested guest state, unless we are in SMM.
319 * In this case we will return to the nested guest
320 * as soon as we leave SMM.
323 svm_free_nested(svm
);
326 int ret
= svm_allocate_nested(svm
);
329 vcpu
->arch
.efer
= old_efer
;
334 * Never intercept #GP for SEV guests, KVM can't
335 * decrypt guest memory to workaround the erratum.
337 if (svm_gp_erratum_intercept
&& !sev_guest(vcpu
->kvm
))
338 set_exception_intercept(svm
, GP_VECTOR
);
342 svm
->vmcb
->save
.efer
= efer
| EFER_SVME
;
343 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
347 static u32
svm_get_interrupt_shadow(struct kvm_vcpu
*vcpu
)
349 struct vcpu_svm
*svm
= to_svm(vcpu
);
352 if (svm
->vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
)
353 ret
= KVM_X86_SHADOW_INT_STI
| KVM_X86_SHADOW_INT_MOV_SS
;
357 static void svm_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
)
359 struct vcpu_svm
*svm
= to_svm(vcpu
);
362 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
364 svm
->vmcb
->control
.int_state
|= SVM_INTERRUPT_SHADOW_MASK
;
368 static int __svm_skip_emulated_instruction(struct kvm_vcpu
*vcpu
,
369 bool commit_side_effects
)
371 struct vcpu_svm
*svm
= to_svm(vcpu
);
372 unsigned long old_rflags
;
375 * SEV-ES does not expose the next RIP. The RIP update is controlled by
376 * the type of exit and the #VC handler in the guest.
378 if (sev_es_guest(vcpu
->kvm
))
381 if (nrips
&& svm
->vmcb
->control
.next_rip
!= 0) {
382 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS
));
383 svm
->next_rip
= svm
->vmcb
->control
.next_rip
;
386 if (!svm
->next_rip
) {
387 if (unlikely(!commit_side_effects
))
388 old_rflags
= svm
->vmcb
->save
.rflags
;
390 if (!kvm_emulate_instruction(vcpu
, EMULTYPE_SKIP
))
393 if (unlikely(!commit_side_effects
))
394 svm
->vmcb
->save
.rflags
= old_rflags
;
396 kvm_rip_write(vcpu
, svm
->next_rip
);
400 if (likely(commit_side_effects
))
401 svm_set_interrupt_shadow(vcpu
, 0);
406 static int svm_skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
408 return __svm_skip_emulated_instruction(vcpu
, true);
411 static int svm_update_soft_interrupt_rip(struct kvm_vcpu
*vcpu
)
413 unsigned long rip
, old_rip
= kvm_rip_read(vcpu
);
414 struct vcpu_svm
*svm
= to_svm(vcpu
);
417 * Due to architectural shortcomings, the CPU doesn't always provide
418 * NextRIP, e.g. if KVM intercepted an exception that occurred while
419 * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip
420 * the instruction even if NextRIP is supported to acquire the next
421 * RIP so that it can be shoved into the NextRIP field, otherwise
422 * hardware will fail to advance guest RIP during event injection.
423 * Drop the exception/interrupt if emulation fails and effectively
424 * retry the instruction, it's the least awful option. If NRIPS is
425 * in use, the skip must not commit any side effects such as clearing
426 * the interrupt shadow or RFLAGS.RF.
428 if (!__svm_skip_emulated_instruction(vcpu
, !nrips
))
431 rip
= kvm_rip_read(vcpu
);
434 * Save the injection information, even when using next_rip, as the
435 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
436 * doesn't complete due to a VM-Exit occurring while the CPU is
437 * vectoring the event. Decoding the instruction isn't guaranteed to
438 * work as there may be no backing instruction, e.g. if the event is
439 * being injected by L1 for L2, or if the guest is patching INT3 into
440 * a different instruction.
442 svm
->soft_int_injected
= true;
443 svm
->soft_int_csbase
= svm
->vmcb
->save
.cs
.base
;
444 svm
->soft_int_old_rip
= old_rip
;
445 svm
->soft_int_next_rip
= rip
;
448 kvm_rip_write(vcpu
, old_rip
);
450 if (static_cpu_has(X86_FEATURE_NRIPS
))
451 svm
->vmcb
->control
.next_rip
= rip
;
456 static void svm_inject_exception(struct kvm_vcpu
*vcpu
)
458 struct kvm_queued_exception
*ex
= &vcpu
->arch
.exception
;
459 struct vcpu_svm
*svm
= to_svm(vcpu
);
461 kvm_deliver_exception_payload(vcpu
, ex
);
463 if (kvm_exception_is_soft(ex
->vector
) &&
464 svm_update_soft_interrupt_rip(vcpu
))
467 svm
->vmcb
->control
.event_inj
= ex
->vector
469 | (ex
->has_error_code
? SVM_EVTINJ_VALID_ERR
: 0)
470 | SVM_EVTINJ_TYPE_EXEPT
;
471 svm
->vmcb
->control
.event_inj_err
= ex
->error_code
;
474 static void svm_init_erratum_383(void)
480 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH
))
483 /* Use _safe variants to not break nested virtualization */
484 val
= native_read_msr_safe(MSR_AMD64_DC_CFG
, &err
);
490 low
= lower_32_bits(val
);
491 high
= upper_32_bits(val
);
493 native_write_msr_safe(MSR_AMD64_DC_CFG
, low
, high
);
495 erratum_383_found
= true;
498 static void svm_init_osvw(struct kvm_vcpu
*vcpu
)
501 * Guests should see errata 400 and 415 as fixed (assuming that
502 * HLT and IO instructions are intercepted).
504 vcpu
->arch
.osvw
.length
= (osvw_len
>= 3) ? (osvw_len
) : 3;
505 vcpu
->arch
.osvw
.status
= osvw_status
& ~(6ULL);
508 * By increasing VCPU's osvw.length to 3 we are telling the guest that
509 * all osvw.status bits inside that length, including bit 0 (which is
510 * reserved for erratum 298), are valid. However, if host processor's
511 * osvw_len is 0 then osvw_status[0] carries no information. We need to
512 * be conservative here and therefore we tell the guest that erratum 298
513 * is present (because we really don't know).
515 if (osvw_len
== 0 && boot_cpu_data
.x86
== 0x10)
516 vcpu
->arch
.osvw
.status
|= 1;
519 static bool __kvm_is_svm_supported(void)
521 int cpu
= smp_processor_id();
522 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
524 if (c
->x86_vendor
!= X86_VENDOR_AMD
&&
525 c
->x86_vendor
!= X86_VENDOR_HYGON
) {
526 pr_err("CPU %d isn't AMD or Hygon\n", cpu
);
530 if (!cpu_has(c
, X86_FEATURE_SVM
)) {
531 pr_err("SVM not supported by CPU %d\n", cpu
);
535 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT
)) {
536 pr_info("KVM is unsupported when running as an SEV guest\n");
543 static bool kvm_is_svm_supported(void)
548 supported
= __kvm_is_svm_supported();
554 static int svm_check_processor_compat(void)
556 if (!__kvm_is_svm_supported())
562 static void __svm_write_tsc_multiplier(u64 multiplier
)
564 if (multiplier
== __this_cpu_read(current_tsc_ratio
))
567 wrmsrl(MSR_AMD64_TSC_RATIO
, multiplier
);
568 __this_cpu_write(current_tsc_ratio
, multiplier
);
571 static inline void kvm_cpu_svm_disable(void)
575 wrmsrl(MSR_VM_HSAVE_PA
, 0);
576 rdmsrl(MSR_EFER
, efer
);
577 if (efer
& EFER_SVME
) {
579 * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
580 * NMI aren't blocked.
583 wrmsrl(MSR_EFER
, efer
& ~EFER_SVME
);
587 static void svm_emergency_disable(void)
589 kvm_rebooting
= true;
591 kvm_cpu_svm_disable();
594 static void svm_hardware_disable(void)
596 /* Make sure we clean up behind us */
598 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT
);
600 kvm_cpu_svm_disable();
602 amd_pmu_disable_virt();
605 static int svm_hardware_enable(void)
608 struct svm_cpu_data
*sd
;
610 int me
= raw_smp_processor_id();
612 rdmsrl(MSR_EFER
, efer
);
613 if (efer
& EFER_SVME
)
616 sd
= per_cpu_ptr(&svm_data
, me
);
617 sd
->asid_generation
= 1;
618 sd
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
619 sd
->next_asid
= sd
->max_asid
+ 1;
620 sd
->min_asid
= max_sev_asid
+ 1;
622 wrmsrl(MSR_EFER
, efer
| EFER_SVME
);
624 wrmsrl(MSR_VM_HSAVE_PA
, sd
->save_area_pa
);
626 if (static_cpu_has(X86_FEATURE_TSCRATEMSR
)) {
628 * Set the default value, even if we don't use TSC scaling
629 * to avoid having stale value in the msr
631 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT
);
638 * Note that it is possible to have a system with mixed processor
639 * revisions and therefore different OSVW bits. If bits are not the same
640 * on different processors then choose the worst case (i.e. if erratum
641 * is present on one processor and not on another then assume that the
642 * erratum is present everywhere).
644 if (cpu_has(&boot_cpu_data
, X86_FEATURE_OSVW
)) {
645 uint64_t len
, status
= 0;
648 len
= native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH
, &err
);
650 status
= native_read_msr_safe(MSR_AMD64_OSVW_STATUS
,
654 osvw_status
= osvw_len
= 0;
658 osvw_status
|= status
;
659 osvw_status
&= (1ULL << osvw_len
) - 1;
662 osvw_status
= osvw_len
= 0;
664 svm_init_erratum_383();
666 amd_pmu_enable_virt();
669 * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
670 * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
671 * Since Linux does not change the value of TSC_AUX once set, prime the
672 * TSC_AUX field now to avoid a RDMSR on every vCPU run.
674 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX
)) {
675 struct sev_es_save_area
*hostsa
;
676 u32 __maybe_unused msr_hi
;
678 hostsa
= (struct sev_es_save_area
*)(page_address(sd
->save_area
) + 0x400);
680 rdmsr(MSR_TSC_AUX
, hostsa
->tsc_aux
, msr_hi
);
686 static void svm_cpu_uninit(int cpu
)
688 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, cpu
);
693 kfree(sd
->sev_vmcbs
);
694 __free_page(sd
->save_area
);
695 sd
->save_area_pa
= 0;
696 sd
->save_area
= NULL
;
699 static int svm_cpu_init(int cpu
)
701 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, cpu
);
704 memset(sd
, 0, sizeof(struct svm_cpu_data
));
705 sd
->save_area
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
709 ret
= sev_cpu_init(sd
);
713 sd
->save_area_pa
= __sme_page_pa(sd
->save_area
);
717 __free_page(sd
->save_area
);
718 sd
->save_area
= NULL
;
723 static void set_dr_intercepts(struct vcpu_svm
*svm
)
725 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
727 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_READ
);
728 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_READ
);
729 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_READ
);
730 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_READ
);
731 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_READ
);
732 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_READ
);
733 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_READ
);
734 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR0_WRITE
);
735 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR1_WRITE
);
736 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR2_WRITE
);
737 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR3_WRITE
);
738 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR4_WRITE
);
739 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR5_WRITE
);
740 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR6_WRITE
);
741 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_READ
);
742 vmcb_set_intercept(&vmcb
->control
, INTERCEPT_DR7_WRITE
);
744 recalc_intercepts(svm
);
747 static void clr_dr_intercepts(struct vcpu_svm
*svm
)
749 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
751 vmcb
->control
.intercepts
[INTERCEPT_DR
] = 0;
753 recalc_intercepts(svm
);
756 static int direct_access_msr_slot(u32 msr
)
760 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++)
761 if (direct_access_msrs
[i
].index
== msr
)
767 static void set_shadow_msr_intercept(struct kvm_vcpu
*vcpu
, u32 msr
, int read
,
770 struct vcpu_svm
*svm
= to_svm(vcpu
);
771 int slot
= direct_access_msr_slot(msr
);
776 /* Set the shadow bitmaps to the desired intercept states */
778 set_bit(slot
, svm
->shadow_msr_intercept
.read
);
780 clear_bit(slot
, svm
->shadow_msr_intercept
.read
);
783 set_bit(slot
, svm
->shadow_msr_intercept
.write
);
785 clear_bit(slot
, svm
->shadow_msr_intercept
.write
);
788 static bool valid_msr_intercept(u32 index
)
790 return direct_access_msr_slot(index
) != -ENOENT
;
793 static bool msr_write_intercepted(struct kvm_vcpu
*vcpu
, u32 msr
)
801 * For non-nested case:
802 * If the L01 MSR bitmap does not intercept the MSR, then we need to
806 * If the L02 MSR bitmap does not intercept the MSR, then we need to
809 msrpm
= is_guest_mode(vcpu
) ? to_svm(vcpu
)->nested
.msrpm
:
812 offset
= svm_msrpm_offset(msr
);
813 bit_write
= 2 * (msr
& 0x0f) + 1;
816 BUG_ON(offset
== MSR_INVALID
);
818 return test_bit(bit_write
, &tmp
);
821 static void set_msr_interception_bitmap(struct kvm_vcpu
*vcpu
, u32
*msrpm
,
822 u32 msr
, int read
, int write
)
824 struct vcpu_svm
*svm
= to_svm(vcpu
);
825 u8 bit_read
, bit_write
;
830 * If this warning triggers extend the direct_access_msrs list at the
831 * beginning of the file
833 WARN_ON(!valid_msr_intercept(msr
));
835 /* Enforce non allowed MSRs to trap */
836 if (read
&& !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_READ
))
839 if (write
&& !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_WRITE
))
842 offset
= svm_msrpm_offset(msr
);
843 bit_read
= 2 * (msr
& 0x0f);
844 bit_write
= 2 * (msr
& 0x0f) + 1;
847 BUG_ON(offset
== MSR_INVALID
);
849 read
? clear_bit(bit_read
, &tmp
) : set_bit(bit_read
, &tmp
);
850 write
? clear_bit(bit_write
, &tmp
) : set_bit(bit_write
, &tmp
);
854 svm_hv_vmcb_dirty_nested_enlightenments(vcpu
);
855 svm
->nested
.force_msr_bitmap_recalc
= true;
858 void set_msr_interception(struct kvm_vcpu
*vcpu
, u32
*msrpm
, u32 msr
,
861 set_shadow_msr_intercept(vcpu
, msr
, read
, write
);
862 set_msr_interception_bitmap(vcpu
, msrpm
, msr
, read
, write
);
865 u32
*svm_vcpu_alloc_msrpm(void)
867 unsigned int order
= get_order(MSRPM_SIZE
);
868 struct page
*pages
= alloc_pages(GFP_KERNEL_ACCOUNT
, order
);
874 msrpm
= page_address(pages
);
875 memset(msrpm
, 0xff, PAGE_SIZE
* (1 << order
));
880 void svm_vcpu_init_msrpm(struct kvm_vcpu
*vcpu
, u32
*msrpm
)
884 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++) {
885 if (!direct_access_msrs
[i
].always
)
887 set_msr_interception(vcpu
, msrpm
, direct_access_msrs
[i
].index
, 1, 1);
891 void svm_set_x2apic_msr_interception(struct vcpu_svm
*svm
, bool intercept
)
895 if (intercept
== svm
->x2avic_msrs_intercepted
)
901 for (i
= 0; i
< MAX_DIRECT_ACCESS_MSRS
; i
++) {
902 int index
= direct_access_msrs
[i
].index
;
904 if ((index
< APIC_BASE_MSR
) ||
905 (index
> APIC_BASE_MSR
+ 0xff))
907 set_msr_interception(&svm
->vcpu
, svm
->msrpm
, index
,
908 !intercept
, !intercept
);
911 svm
->x2avic_msrs_intercepted
= intercept
;
914 void svm_vcpu_free_msrpm(u32
*msrpm
)
916 __free_pages(virt_to_page(msrpm
), get_order(MSRPM_SIZE
));
919 static void svm_msr_filter_changed(struct kvm_vcpu
*vcpu
)
921 struct vcpu_svm
*svm
= to_svm(vcpu
);
925 * Set intercept permissions for all direct access MSRs again. They
926 * will automatically get filtered through the MSR filter, so we are
927 * back in sync after this.
929 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++) {
930 u32 msr
= direct_access_msrs
[i
].index
;
931 u32 read
= test_bit(i
, svm
->shadow_msr_intercept
.read
);
932 u32 write
= test_bit(i
, svm
->shadow_msr_intercept
.write
);
934 set_msr_interception_bitmap(vcpu
, svm
->msrpm
, msr
, read
, write
);
938 static void add_msr_offset(u32 offset
)
942 for (i
= 0; i
< MSRPM_OFFSETS
; ++i
) {
944 /* Offset already in list? */
945 if (msrpm_offsets
[i
] == offset
)
948 /* Slot used by another offset? */
949 if (msrpm_offsets
[i
] != MSR_INVALID
)
952 /* Add offset to list */
953 msrpm_offsets
[i
] = offset
;
959 * If this BUG triggers the msrpm_offsets table has an overflow. Just
960 * increase MSRPM_OFFSETS in this case.
965 static void init_msrpm_offsets(void)
969 memset(msrpm_offsets
, 0xff, sizeof(msrpm_offsets
));
971 for (i
= 0; direct_access_msrs
[i
].index
!= MSR_INVALID
; i
++) {
974 offset
= svm_msrpm_offset(direct_access_msrs
[i
].index
);
975 BUG_ON(offset
== MSR_INVALID
);
977 add_msr_offset(offset
);
981 void svm_copy_lbrs(struct vmcb
*to_vmcb
, struct vmcb
*from_vmcb
)
983 to_vmcb
->save
.dbgctl
= from_vmcb
->save
.dbgctl
;
984 to_vmcb
->save
.br_from
= from_vmcb
->save
.br_from
;
985 to_vmcb
->save
.br_to
= from_vmcb
->save
.br_to
;
986 to_vmcb
->save
.last_excp_from
= from_vmcb
->save
.last_excp_from
;
987 to_vmcb
->save
.last_excp_to
= from_vmcb
->save
.last_excp_to
;
989 vmcb_mark_dirty(to_vmcb
, VMCB_LBR
);
992 static void svm_enable_lbrv(struct kvm_vcpu
*vcpu
)
994 struct vcpu_svm
*svm
= to_svm(vcpu
);
996 svm
->vmcb
->control
.virt_ext
|= LBR_CTL_ENABLE_MASK
;
997 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 1, 1);
998 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHTOIP
, 1, 1);
999 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTFROMIP
, 1, 1);
1000 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTTOIP
, 1, 1);
1002 /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
1003 if (is_guest_mode(vcpu
))
1004 svm_copy_lbrs(svm
->vmcb
, svm
->vmcb01
.ptr
);
1007 static void svm_disable_lbrv(struct kvm_vcpu
*vcpu
)
1009 struct vcpu_svm
*svm
= to_svm(vcpu
);
1011 svm
->vmcb
->control
.virt_ext
&= ~LBR_CTL_ENABLE_MASK
;
1012 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 0, 0);
1013 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTBRANCHTOIP
, 0, 0);
1014 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTFROMIP
, 0, 0);
1015 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_LASTINTTOIP
, 0, 0);
1018 * Move the LBR msrs back to the vmcb01 to avoid copying them
1019 * on nested guest entries.
1021 if (is_guest_mode(vcpu
))
1022 svm_copy_lbrs(svm
->vmcb01
.ptr
, svm
->vmcb
);
1025 static struct vmcb
*svm_get_lbr_vmcb(struct vcpu_svm
*svm
)
1028 * If LBR virtualization is disabled, the LBR MSRs are always kept in
1029 * vmcb01. If LBR virtualization is enabled and L1 is running VMs of
1030 * its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
1032 return svm
->vmcb
->control
.virt_ext
& LBR_CTL_ENABLE_MASK
? svm
->vmcb
:
1036 void svm_update_lbrv(struct kvm_vcpu
*vcpu
)
1038 struct vcpu_svm
*svm
= to_svm(vcpu
);
1039 bool current_enable_lbrv
= svm
->vmcb
->control
.virt_ext
& LBR_CTL_ENABLE_MASK
;
1040 bool enable_lbrv
= (svm_get_lbr_vmcb(svm
)->save
.dbgctl
& DEBUGCTLMSR_LBR
) ||
1041 (is_guest_mode(vcpu
) && guest_can_use(vcpu
, X86_FEATURE_LBRV
) &&
1042 (svm
->nested
.ctl
.virt_ext
& LBR_CTL_ENABLE_MASK
));
1044 if (enable_lbrv
== current_enable_lbrv
)
1048 svm_enable_lbrv(vcpu
);
1050 svm_disable_lbrv(vcpu
);
1053 void disable_nmi_singlestep(struct vcpu_svm
*svm
)
1055 svm
->nmi_singlestep
= false;
1057 if (!(svm
->vcpu
.guest_debug
& KVM_GUESTDBG_SINGLESTEP
)) {
1058 /* Clear our flags if they were not set by the guest */
1059 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_TF
))
1060 svm
->vmcb
->save
.rflags
&= ~X86_EFLAGS_TF
;
1061 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_RF
))
1062 svm
->vmcb
->save
.rflags
&= ~X86_EFLAGS_RF
;
1066 static void grow_ple_window(struct kvm_vcpu
*vcpu
)
1068 struct vcpu_svm
*svm
= to_svm(vcpu
);
1069 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1070 int old
= control
->pause_filter_count
;
1072 if (kvm_pause_in_guest(vcpu
->kvm
))
1075 control
->pause_filter_count
= __grow_ple_window(old
,
1077 pause_filter_count_grow
,
1078 pause_filter_count_max
);
1080 if (control
->pause_filter_count
!= old
) {
1081 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTERCEPTS
);
1082 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
1083 control
->pause_filter_count
, old
);
1087 static void shrink_ple_window(struct kvm_vcpu
*vcpu
)
1089 struct vcpu_svm
*svm
= to_svm(vcpu
);
1090 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1091 int old
= control
->pause_filter_count
;
1093 if (kvm_pause_in_guest(vcpu
->kvm
))
1096 control
->pause_filter_count
=
1097 __shrink_ple_window(old
,
1099 pause_filter_count_shrink
,
1100 pause_filter_count
);
1101 if (control
->pause_filter_count
!= old
) {
1102 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTERCEPTS
);
1103 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
1104 control
->pause_filter_count
, old
);
1108 static void svm_hardware_unsetup(void)
1112 sev_hardware_unsetup();
1114 for_each_possible_cpu(cpu
)
1115 svm_cpu_uninit(cpu
);
1117 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
),
1118 get_order(IOPM_SIZE
));
1122 static void init_seg(struct vmcb_seg
*seg
)
1125 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
1126 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
1127 seg
->limit
= 0xffff;
1131 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
1134 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
1135 seg
->limit
= 0xffff;
1139 static u64
svm_get_l2_tsc_offset(struct kvm_vcpu
*vcpu
)
1141 struct vcpu_svm
*svm
= to_svm(vcpu
);
1143 return svm
->nested
.ctl
.tsc_offset
;
1146 static u64
svm_get_l2_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1148 struct vcpu_svm
*svm
= to_svm(vcpu
);
1150 return svm
->tsc_ratio_msr
;
1153 static void svm_write_tsc_offset(struct kvm_vcpu
*vcpu
)
1155 struct vcpu_svm
*svm
= to_svm(vcpu
);
1157 svm
->vmcb01
.ptr
->control
.tsc_offset
= vcpu
->arch
.l1_tsc_offset
;
1158 svm
->vmcb
->control
.tsc_offset
= vcpu
->arch
.tsc_offset
;
1159 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTERCEPTS
);
1162 void svm_write_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1165 if (to_svm(vcpu
)->guest_state_loaded
)
1166 __svm_write_tsc_multiplier(vcpu
->arch
.tsc_scaling_ratio
);
1170 /* Evaluate instruction intercepts that depend on guest CPUID features. */
1171 static void svm_recalc_instruction_intercepts(struct kvm_vcpu
*vcpu
,
1172 struct vcpu_svm
*svm
)
1175 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1176 * roots, or if INVPCID is disabled in the guest to inject #UD.
1178 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID
)) {
1180 !guest_cpuid_has(&svm
->vcpu
, X86_FEATURE_INVPCID
))
1181 svm_set_intercept(svm
, INTERCEPT_INVPCID
);
1183 svm_clr_intercept(svm
, INTERCEPT_INVPCID
);
1186 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP
)) {
1187 if (guest_cpuid_has(vcpu
, X86_FEATURE_RDTSCP
))
1188 svm_clr_intercept(svm
, INTERCEPT_RDTSCP
);
1190 svm_set_intercept(svm
, INTERCEPT_RDTSCP
);
1194 static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu
*vcpu
)
1196 struct vcpu_svm
*svm
= to_svm(vcpu
);
1198 if (guest_cpuid_is_intel(vcpu
)) {
1200 * We must intercept SYSENTER_EIP and SYSENTER_ESP
1201 * accesses because the processor only stores 32 bits.
1202 * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1204 svm_set_intercept(svm
, INTERCEPT_VMLOAD
);
1205 svm_set_intercept(svm
, INTERCEPT_VMSAVE
);
1206 svm
->vmcb
->control
.virt_ext
&= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK
;
1208 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_EIP
, 0, 0);
1209 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_ESP
, 0, 0);
1212 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1213 * in VMCB and clear intercepts to avoid #VMEXIT.
1216 svm_clr_intercept(svm
, INTERCEPT_VMLOAD
);
1217 svm_clr_intercept(svm
, INTERCEPT_VMSAVE
);
1218 svm
->vmcb
->control
.virt_ext
|= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK
;
1220 /* No need to intercept these MSRs */
1221 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_EIP
, 1, 1);
1222 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SYSENTER_ESP
, 1, 1);
1226 static void init_vmcb(struct kvm_vcpu
*vcpu
)
1228 struct vcpu_svm
*svm
= to_svm(vcpu
);
1229 struct vmcb
*vmcb
= svm
->vmcb01
.ptr
;
1230 struct vmcb_control_area
*control
= &vmcb
->control
;
1231 struct vmcb_save_area
*save
= &vmcb
->save
;
1233 svm_set_intercept(svm
, INTERCEPT_CR0_READ
);
1234 svm_set_intercept(svm
, INTERCEPT_CR3_READ
);
1235 svm_set_intercept(svm
, INTERCEPT_CR4_READ
);
1236 svm_set_intercept(svm
, INTERCEPT_CR0_WRITE
);
1237 svm_set_intercept(svm
, INTERCEPT_CR3_WRITE
);
1238 svm_set_intercept(svm
, INTERCEPT_CR4_WRITE
);
1239 if (!kvm_vcpu_apicv_active(vcpu
))
1240 svm_set_intercept(svm
, INTERCEPT_CR8_WRITE
);
1242 set_dr_intercepts(svm
);
1244 set_exception_intercept(svm
, PF_VECTOR
);
1245 set_exception_intercept(svm
, UD_VECTOR
);
1246 set_exception_intercept(svm
, MC_VECTOR
);
1247 set_exception_intercept(svm
, AC_VECTOR
);
1248 set_exception_intercept(svm
, DB_VECTOR
);
1250 * Guest access to VMware backdoor ports could legitimately
1251 * trigger #GP because of TSS I/O permission bitmap.
1252 * We intercept those #GP and allow access to them anyway
1255 if (enable_vmware_backdoor
)
1256 set_exception_intercept(svm
, GP_VECTOR
);
1258 svm_set_intercept(svm
, INTERCEPT_INTR
);
1259 svm_set_intercept(svm
, INTERCEPT_NMI
);
1262 svm_set_intercept(svm
, INTERCEPT_SMI
);
1264 svm_set_intercept(svm
, INTERCEPT_SELECTIVE_CR0
);
1265 svm_set_intercept(svm
, INTERCEPT_RDPMC
);
1266 svm_set_intercept(svm
, INTERCEPT_CPUID
);
1267 svm_set_intercept(svm
, INTERCEPT_INVD
);
1268 svm_set_intercept(svm
, INTERCEPT_INVLPG
);
1269 svm_set_intercept(svm
, INTERCEPT_INVLPGA
);
1270 svm_set_intercept(svm
, INTERCEPT_IOIO_PROT
);
1271 svm_set_intercept(svm
, INTERCEPT_MSR_PROT
);
1272 svm_set_intercept(svm
, INTERCEPT_TASK_SWITCH
);
1273 svm_set_intercept(svm
, INTERCEPT_SHUTDOWN
);
1274 svm_set_intercept(svm
, INTERCEPT_VMRUN
);
1275 svm_set_intercept(svm
, INTERCEPT_VMMCALL
);
1276 svm_set_intercept(svm
, INTERCEPT_VMLOAD
);
1277 svm_set_intercept(svm
, INTERCEPT_VMSAVE
);
1278 svm_set_intercept(svm
, INTERCEPT_STGI
);
1279 svm_set_intercept(svm
, INTERCEPT_CLGI
);
1280 svm_set_intercept(svm
, INTERCEPT_SKINIT
);
1281 svm_set_intercept(svm
, INTERCEPT_WBINVD
);
1282 svm_set_intercept(svm
, INTERCEPT_XSETBV
);
1283 svm_set_intercept(svm
, INTERCEPT_RDPRU
);
1284 svm_set_intercept(svm
, INTERCEPT_RSM
);
1286 if (!kvm_mwait_in_guest(vcpu
->kvm
)) {
1287 svm_set_intercept(svm
, INTERCEPT_MONITOR
);
1288 svm_set_intercept(svm
, INTERCEPT_MWAIT
);
1291 if (!kvm_hlt_in_guest(vcpu
->kvm
))
1292 svm_set_intercept(svm
, INTERCEPT_HLT
);
1294 control
->iopm_base_pa
= __sme_set(iopm_base
);
1295 control
->msrpm_base_pa
= __sme_set(__pa(svm
->msrpm
));
1296 control
->int_ctl
= V_INTR_MASKING_MASK
;
1298 init_seg(&save
->es
);
1299 init_seg(&save
->ss
);
1300 init_seg(&save
->ds
);
1301 init_seg(&save
->fs
);
1302 init_seg(&save
->gs
);
1304 save
->cs
.selector
= 0xf000;
1305 save
->cs
.base
= 0xffff0000;
1306 /* Executable/Readable Code Segment */
1307 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
1308 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
1309 save
->cs
.limit
= 0xffff;
1311 save
->gdtr
.base
= 0;
1312 save
->gdtr
.limit
= 0xffff;
1313 save
->idtr
.base
= 0;
1314 save
->idtr
.limit
= 0xffff;
1316 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
1317 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
1320 /* Setup VMCB for Nested Paging */
1321 control
->nested_ctl
|= SVM_NESTED_CTL_NP_ENABLE
;
1322 svm_clr_intercept(svm
, INTERCEPT_INVLPG
);
1323 clr_exception_intercept(svm
, PF_VECTOR
);
1324 svm_clr_intercept(svm
, INTERCEPT_CR3_READ
);
1325 svm_clr_intercept(svm
, INTERCEPT_CR3_WRITE
);
1326 save
->g_pat
= vcpu
->arch
.pat
;
1329 svm
->current_vmcb
->asid_generation
= 0;
1332 svm
->nested
.vmcb12_gpa
= INVALID_GPA
;
1333 svm
->nested
.last_vmcb12_gpa
= INVALID_GPA
;
1335 if (!kvm_pause_in_guest(vcpu
->kvm
)) {
1336 control
->pause_filter_count
= pause_filter_count
;
1337 if (pause_filter_thresh
)
1338 control
->pause_filter_thresh
= pause_filter_thresh
;
1339 svm_set_intercept(svm
, INTERCEPT_PAUSE
);
1341 svm_clr_intercept(svm
, INTERCEPT_PAUSE
);
1344 svm_recalc_instruction_intercepts(vcpu
, svm
);
1347 * If the host supports V_SPEC_CTRL then disable the interception
1348 * of MSR_IA32_SPEC_CTRL.
1350 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
1351 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SPEC_CTRL
, 1, 1);
1353 if (kvm_vcpu_apicv_active(vcpu
))
1354 avic_init_vmcb(svm
, vmcb
);
1357 svm
->vmcb
->control
.int_ctl
|= V_NMI_ENABLE_MASK
;
1360 svm_clr_intercept(svm
, INTERCEPT_STGI
);
1361 svm_clr_intercept(svm
, INTERCEPT_CLGI
);
1362 svm
->vmcb
->control
.int_ctl
|= V_GIF_ENABLE_MASK
;
1365 if (sev_guest(vcpu
->kvm
))
1368 svm_hv_init_vmcb(vmcb
);
1369 init_vmcb_after_set_cpuid(vcpu
);
1371 vmcb_mark_all_dirty(vmcb
);
1376 static void __svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
1378 struct vcpu_svm
*svm
= to_svm(vcpu
);
1380 svm_vcpu_init_msrpm(vcpu
, svm
->msrpm
);
1382 svm_init_osvw(vcpu
);
1383 vcpu
->arch
.microcode_version
= 0x01000065;
1384 svm
->tsc_ratio_msr
= kvm_caps
.default_tsc_scaling_ratio
;
1386 svm
->nmi_masked
= false;
1387 svm
->awaiting_iret_completion
= false;
1389 if (sev_es_guest(vcpu
->kvm
))
1390 sev_es_vcpu_reset(svm
);
1393 static void svm_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
1395 struct vcpu_svm
*svm
= to_svm(vcpu
);
1398 svm
->virt_spec_ctrl
= 0;
1403 __svm_vcpu_reset(vcpu
);
1406 void svm_switch_vmcb(struct vcpu_svm
*svm
, struct kvm_vmcb_info
*target_vmcb
)
1408 svm
->current_vmcb
= target_vmcb
;
1409 svm
->vmcb
= target_vmcb
->ptr
;
1412 static int svm_vcpu_create(struct kvm_vcpu
*vcpu
)
1414 struct vcpu_svm
*svm
;
1415 struct page
*vmcb01_page
;
1416 struct page
*vmsa_page
= NULL
;
1419 BUILD_BUG_ON(offsetof(struct vcpu_svm
, vcpu
) != 0);
1423 vmcb01_page
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
1427 if (sev_es_guest(vcpu
->kvm
)) {
1429 * SEV-ES guests require a separate VMSA page used to contain
1430 * the encrypted register state of the guest.
1432 vmsa_page
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
1434 goto error_free_vmcb_page
;
1437 * SEV-ES guests maintain an encrypted version of their FPU
1438 * state which is restored and saved on VMRUN and VMEXIT.
1439 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1440 * do xsave/xrstor on it.
1442 fpstate_set_confidential(&vcpu
->arch
.guest_fpu
);
1445 err
= avic_init_vcpu(svm
);
1447 goto error_free_vmsa_page
;
1449 svm
->msrpm
= svm_vcpu_alloc_msrpm();
1452 goto error_free_vmsa_page
;
1455 svm
->x2avic_msrs_intercepted
= true;
1457 svm
->vmcb01
.ptr
= page_address(vmcb01_page
);
1458 svm
->vmcb01
.pa
= __sme_set(page_to_pfn(vmcb01_page
) << PAGE_SHIFT
);
1459 svm_switch_vmcb(svm
, &svm
->vmcb01
);
1462 svm
->sev_es
.vmsa
= page_address(vmsa_page
);
1464 svm
->guest_state_loaded
= false;
1468 error_free_vmsa_page
:
1470 __free_page(vmsa_page
);
1471 error_free_vmcb_page
:
1472 __free_page(vmcb01_page
);
1477 static void svm_clear_current_vmcb(struct vmcb
*vmcb
)
1481 for_each_online_cpu(i
)
1482 cmpxchg(per_cpu_ptr(&svm_data
.current_vmcb
, i
), vmcb
, NULL
);
1485 static void svm_vcpu_free(struct kvm_vcpu
*vcpu
)
1487 struct vcpu_svm
*svm
= to_svm(vcpu
);
1490 * The vmcb page can be recycled, causing a false negative in
1491 * svm_vcpu_load(). So, ensure that no logical CPU has this
1492 * vmcb page recorded as its current vmcb.
1494 svm_clear_current_vmcb(svm
->vmcb
);
1496 svm_leave_nested(vcpu
);
1497 svm_free_nested(svm
);
1499 sev_free_vcpu(vcpu
);
1501 __free_page(pfn_to_page(__sme_clr(svm
->vmcb01
.pa
) >> PAGE_SHIFT
));
1502 __free_pages(virt_to_page(svm
->msrpm
), get_order(MSRPM_SIZE
));
1505 static void svm_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
)
1507 struct vcpu_svm
*svm
= to_svm(vcpu
);
1508 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, vcpu
->cpu
);
1510 if (sev_es_guest(vcpu
->kvm
))
1511 sev_es_unmap_ghcb(svm
);
1513 if (svm
->guest_state_loaded
)
1517 * Save additional host state that will be restored on VMEXIT (sev-es)
1518 * or subsequent vmload of host save area.
1520 vmsave(sd
->save_area_pa
);
1521 if (sev_es_guest(vcpu
->kvm
)) {
1522 struct sev_es_save_area
*hostsa
;
1523 hostsa
= (struct sev_es_save_area
*)(page_address(sd
->save_area
) + 0x400);
1525 sev_es_prepare_switch_to_guest(hostsa
);
1529 __svm_write_tsc_multiplier(vcpu
->arch
.tsc_scaling_ratio
);
1532 * TSC_AUX is always virtualized for SEV-ES guests when the feature is
1533 * available. The user return MSR support is not required in this case
1534 * because TSC_AUX is restored on #VMEXIT from the host save area
1535 * (which has been initialized in svm_hardware_enable()).
1537 if (likely(tsc_aux_uret_slot
>= 0) &&
1538 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX
) || !sev_es_guest(vcpu
->kvm
)))
1539 kvm_set_user_return_msr(tsc_aux_uret_slot
, svm
->tsc_aux
, -1ull);
1541 svm
->guest_state_loaded
= true;
1544 static void svm_prepare_host_switch(struct kvm_vcpu
*vcpu
)
1546 to_svm(vcpu
)->guest_state_loaded
= false;
1549 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1551 struct vcpu_svm
*svm
= to_svm(vcpu
);
1552 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, cpu
);
1554 if (sd
->current_vmcb
!= svm
->vmcb
) {
1555 sd
->current_vmcb
= svm
->vmcb
;
1557 if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT
))
1558 indirect_branch_prediction_barrier();
1560 if (kvm_vcpu_apicv_active(vcpu
))
1561 avic_vcpu_load(vcpu
, cpu
);
1564 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
1566 if (kvm_vcpu_apicv_active(vcpu
))
1567 avic_vcpu_put(vcpu
);
1569 svm_prepare_host_switch(vcpu
);
1571 ++vcpu
->stat
.host_state_reload
;
1574 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
1576 struct vcpu_svm
*svm
= to_svm(vcpu
);
1577 unsigned long rflags
= svm
->vmcb
->save
.rflags
;
1579 if (svm
->nmi_singlestep
) {
1580 /* Hide our flags if they were not set by the guest */
1581 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_TF
))
1582 rflags
&= ~X86_EFLAGS_TF
;
1583 if (!(svm
->nmi_singlestep_guest_rflags
& X86_EFLAGS_RF
))
1584 rflags
&= ~X86_EFLAGS_RF
;
1589 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
1591 if (to_svm(vcpu
)->nmi_singlestep
)
1592 rflags
|= (X86_EFLAGS_TF
| X86_EFLAGS_RF
);
1595 * Any change of EFLAGS.VM is accompanied by a reload of SS
1596 * (caused by either a task switch or an inter-privilege IRET),
1597 * so we do not need to update the CPL here.
1599 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
1602 static bool svm_get_if_flag(struct kvm_vcpu
*vcpu
)
1604 struct vmcb
*vmcb
= to_svm(vcpu
)->vmcb
;
1606 return sev_es_guest(vcpu
->kvm
)
1607 ? vmcb
->control
.int_state
& SVM_GUEST_INTERRUPT_MASK
1608 : kvm_get_rflags(vcpu
) & X86_EFLAGS_IF
;
1611 static void svm_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
)
1613 kvm_register_mark_available(vcpu
, reg
);
1616 case VCPU_EXREG_PDPTR
:
1618 * When !npt_enabled, mmu->pdptrs[] is already available since
1619 * it is always updated per SDM when moving to CRs.
1622 load_pdptrs(vcpu
, kvm_read_cr3(vcpu
));
1625 KVM_BUG_ON(1, vcpu
->kvm
);
1629 static void svm_set_vintr(struct vcpu_svm
*svm
)
1631 struct vmcb_control_area
*control
;
1634 * The following fields are ignored when AVIC is enabled
1636 WARN_ON(kvm_vcpu_apicv_activated(&svm
->vcpu
));
1638 svm_set_intercept(svm
, INTERCEPT_VINTR
);
1641 * Recalculating intercepts may have cleared the VINTR intercept. If
1642 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1643 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1644 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1645 * interrupts will never be unblocked while L2 is running.
1647 if (!svm_is_intercept(svm
, INTERCEPT_VINTR
))
1651 * This is just a dummy VINTR to actually cause a vmexit to happen.
1652 * Actual injection of virtual interrupts happens through EVENTINJ.
1654 control
= &svm
->vmcb
->control
;
1655 control
->int_vector
= 0x0;
1656 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1657 control
->int_ctl
|= V_IRQ_MASK
|
1658 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1659 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTR
);
1662 static void svm_clear_vintr(struct vcpu_svm
*svm
)
1664 svm_clr_intercept(svm
, INTERCEPT_VINTR
);
1666 /* Drop int_ctl fields related to VINTR injection. */
1667 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_INJECTION_BITS_MASK
;
1668 if (is_guest_mode(&svm
->vcpu
)) {
1669 svm
->vmcb01
.ptr
->control
.int_ctl
&= ~V_IRQ_INJECTION_BITS_MASK
;
1671 WARN_ON((svm
->vmcb
->control
.int_ctl
& V_TPR_MASK
) !=
1672 (svm
->nested
.ctl
.int_ctl
& V_TPR_MASK
));
1674 svm
->vmcb
->control
.int_ctl
|= svm
->nested
.ctl
.int_ctl
&
1675 V_IRQ_INJECTION_BITS_MASK
;
1677 svm
->vmcb
->control
.int_vector
= svm
->nested
.ctl
.int_vector
;
1680 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTR
);
1683 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
1685 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
1686 struct vmcb_save_area
*save01
= &to_svm(vcpu
)->vmcb01
.ptr
->save
;
1689 case VCPU_SREG_CS
: return &save
->cs
;
1690 case VCPU_SREG_DS
: return &save
->ds
;
1691 case VCPU_SREG_ES
: return &save
->es
;
1692 case VCPU_SREG_FS
: return &save01
->fs
;
1693 case VCPU_SREG_GS
: return &save01
->gs
;
1694 case VCPU_SREG_SS
: return &save
->ss
;
1695 case VCPU_SREG_TR
: return &save01
->tr
;
1696 case VCPU_SREG_LDTR
: return &save01
->ldtr
;
1702 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
1704 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1709 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
1710 struct kvm_segment
*var
, int seg
)
1712 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1714 var
->base
= s
->base
;
1715 var
->limit
= s
->limit
;
1716 var
->selector
= s
->selector
;
1717 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
1718 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
1719 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
1720 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
1721 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
1722 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
1723 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
1726 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1727 * However, the SVM spec states that the G bit is not observed by the
1728 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1729 * So let's synthesize a legal G bit for all segments, this helps
1730 * running KVM nested. It also helps cross-vendor migration, because
1731 * Intel's vmentry has a check on the 'G' bit.
1733 var
->g
= s
->limit
> 0xfffff;
1736 * AMD's VMCB does not have an explicit unusable field, so emulate it
1737 * for cross vendor migration purposes by "not present"
1739 var
->unusable
= !var
->present
;
1744 * Work around a bug where the busy flag in the tr selector
1754 * The accessed bit must always be set in the segment
1755 * descriptor cache, although it can be cleared in the
1756 * descriptor, the cached bit always remains at 1. Since
1757 * Intel has a check on this, set it here to support
1758 * cross-vendor migration.
1765 * On AMD CPUs sometimes the DB bit in the segment
1766 * descriptor is left as 1, although the whole segment has
1767 * been made unusable. Clear it here to pass an Intel VMX
1768 * entry check when cross vendor migrating.
1772 /* This is symmetric with svm_set_segment() */
1773 var
->dpl
= to_svm(vcpu
)->vmcb
->save
.cpl
;
1778 static int svm_get_cpl(struct kvm_vcpu
*vcpu
)
1780 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
1785 static void svm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
1787 struct kvm_segment cs
;
1789 svm_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
1794 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1796 struct vcpu_svm
*svm
= to_svm(vcpu
);
1798 dt
->size
= svm
->vmcb
->save
.idtr
.limit
;
1799 dt
->address
= svm
->vmcb
->save
.idtr
.base
;
1802 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1804 struct vcpu_svm
*svm
= to_svm(vcpu
);
1806 svm
->vmcb
->save
.idtr
.limit
= dt
->size
;
1807 svm
->vmcb
->save
.idtr
.base
= dt
->address
;
1808 vmcb_mark_dirty(svm
->vmcb
, VMCB_DT
);
1811 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1813 struct vcpu_svm
*svm
= to_svm(vcpu
);
1815 dt
->size
= svm
->vmcb
->save
.gdtr
.limit
;
1816 dt
->address
= svm
->vmcb
->save
.gdtr
.base
;
1819 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
1821 struct vcpu_svm
*svm
= to_svm(vcpu
);
1823 svm
->vmcb
->save
.gdtr
.limit
= dt
->size
;
1824 svm
->vmcb
->save
.gdtr
.base
= dt
->address
;
1825 vmcb_mark_dirty(svm
->vmcb
, VMCB_DT
);
1828 static void sev_post_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
1830 struct vcpu_svm
*svm
= to_svm(vcpu
);
1833 * For guests that don't set guest_state_protected, the cr3 update is
1834 * handled via kvm_mmu_load() while entering the guest. For guests
1835 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1836 * VMCB save area now, since the save area will become the initial
1837 * contents of the VMSA, and future VMCB save area updates won't be
1840 if (sev_es_guest(vcpu
->kvm
)) {
1841 svm
->vmcb
->save
.cr3
= cr3
;
1842 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
1846 static bool svm_is_valid_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
1851 void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
1853 struct vcpu_svm
*svm
= to_svm(vcpu
);
1855 bool old_paging
= is_paging(vcpu
);
1857 #ifdef CONFIG_X86_64
1858 if (vcpu
->arch
.efer
& EFER_LME
) {
1859 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
1860 vcpu
->arch
.efer
|= EFER_LMA
;
1861 if (!vcpu
->arch
.guest_state_protected
)
1862 svm
->vmcb
->save
.efer
|= EFER_LMA
| EFER_LME
;
1865 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
)) {
1866 vcpu
->arch
.efer
&= ~EFER_LMA
;
1867 if (!vcpu
->arch
.guest_state_protected
)
1868 svm
->vmcb
->save
.efer
&= ~(EFER_LMA
| EFER_LME
);
1872 vcpu
->arch
.cr0
= cr0
;
1875 hcr0
|= X86_CR0_PG
| X86_CR0_WP
;
1876 if (old_paging
!= is_paging(vcpu
))
1877 svm_set_cr4(vcpu
, kvm_read_cr4(vcpu
));
1881 * re-enable caching here because the QEMU bios
1882 * does not do it - this results in some delay at
1885 if (kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_CD_NW_CLEARED
))
1886 hcr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
1888 svm
->vmcb
->save
.cr0
= hcr0
;
1889 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
1892 * SEV-ES guests must always keep the CR intercepts cleared. CR
1893 * tracking is done using the CR write traps.
1895 if (sev_es_guest(vcpu
->kvm
))
1899 /* Selective CR0 write remains on. */
1900 svm_clr_intercept(svm
, INTERCEPT_CR0_READ
);
1901 svm_clr_intercept(svm
, INTERCEPT_CR0_WRITE
);
1903 svm_set_intercept(svm
, INTERCEPT_CR0_READ
);
1904 svm_set_intercept(svm
, INTERCEPT_CR0_WRITE
);
1908 static bool svm_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1913 void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1915 unsigned long host_cr4_mce
= cr4_read_shadow() & X86_CR4_MCE
;
1916 unsigned long old_cr4
= vcpu
->arch
.cr4
;
1918 if (npt_enabled
&& ((old_cr4
^ cr4
) & X86_CR4_PGE
))
1919 svm_flush_tlb_current(vcpu
);
1921 vcpu
->arch
.cr4
= cr4
;
1925 if (!is_paging(vcpu
))
1926 cr4
&= ~(X86_CR4_SMEP
| X86_CR4_SMAP
| X86_CR4_PKE
);
1928 cr4
|= host_cr4_mce
;
1929 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
;
1930 vmcb_mark_dirty(to_svm(vcpu
)->vmcb
, VMCB_CR
);
1932 if ((cr4
^ old_cr4
) & (X86_CR4_OSXSAVE
| X86_CR4_PKE
))
1933 kvm_update_cpuid_runtime(vcpu
);
1936 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
1937 struct kvm_segment
*var
, int seg
)
1939 struct vcpu_svm
*svm
= to_svm(vcpu
);
1940 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
1942 s
->base
= var
->base
;
1943 s
->limit
= var
->limit
;
1944 s
->selector
= var
->selector
;
1945 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
1946 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
1947 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
1948 s
->attrib
|= ((var
->present
& 1) && !var
->unusable
) << SVM_SELECTOR_P_SHIFT
;
1949 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
1950 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
1951 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
1952 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
1955 * This is always accurate, except if SYSRET returned to a segment
1956 * with SS.DPL != 3. Intel does not have this quirk, and always
1957 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1958 * would entail passing the CPL to userspace and back.
1960 if (seg
== VCPU_SREG_SS
)
1961 /* This is symmetric with svm_get_segment() */
1962 svm
->vmcb
->save
.cpl
= (var
->dpl
& 3);
1964 vmcb_mark_dirty(svm
->vmcb
, VMCB_SEG
);
1967 static void svm_update_exception_bitmap(struct kvm_vcpu
*vcpu
)
1969 struct vcpu_svm
*svm
= to_svm(vcpu
);
1971 clr_exception_intercept(svm
, BP_VECTOR
);
1973 if (vcpu
->guest_debug
& KVM_GUESTDBG_ENABLE
) {
1974 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
1975 set_exception_intercept(svm
, BP_VECTOR
);
1979 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*sd
)
1981 if (sd
->next_asid
> sd
->max_asid
) {
1982 ++sd
->asid_generation
;
1983 sd
->next_asid
= sd
->min_asid
;
1984 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
1985 vmcb_mark_dirty(svm
->vmcb
, VMCB_ASID
);
1988 svm
->current_vmcb
->asid_generation
= sd
->asid_generation
;
1989 svm
->asid
= sd
->next_asid
++;
1992 static void svm_set_dr6(struct vcpu_svm
*svm
, unsigned long value
)
1994 struct vmcb
*vmcb
= svm
->vmcb
;
1996 if (svm
->vcpu
.arch
.guest_state_protected
)
1999 if (unlikely(value
!= vmcb
->save
.dr6
)) {
2000 vmcb
->save
.dr6
= value
;
2001 vmcb_mark_dirty(vmcb
, VMCB_DR
);
2005 static void svm_sync_dirty_debug_regs(struct kvm_vcpu
*vcpu
)
2007 struct vcpu_svm
*svm
= to_svm(vcpu
);
2009 if (WARN_ON_ONCE(sev_es_guest(vcpu
->kvm
)))
2012 get_debugreg(vcpu
->arch
.db
[0], 0);
2013 get_debugreg(vcpu
->arch
.db
[1], 1);
2014 get_debugreg(vcpu
->arch
.db
[2], 2);
2015 get_debugreg(vcpu
->arch
.db
[3], 3);
2017 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
2018 * because db_interception might need it. We can do it before vmentry.
2020 vcpu
->arch
.dr6
= svm
->vmcb
->save
.dr6
;
2021 vcpu
->arch
.dr7
= svm
->vmcb
->save
.dr7
;
2022 vcpu
->arch
.switch_db_regs
&= ~KVM_DEBUGREG_WONT_EXIT
;
2023 set_dr_intercepts(svm
);
2026 static void svm_set_dr7(struct kvm_vcpu
*vcpu
, unsigned long value
)
2028 struct vcpu_svm
*svm
= to_svm(vcpu
);
2030 if (vcpu
->arch
.guest_state_protected
)
2033 svm
->vmcb
->save
.dr7
= value
;
2034 vmcb_mark_dirty(svm
->vmcb
, VMCB_DR
);
2037 static int pf_interception(struct kvm_vcpu
*vcpu
)
2039 struct vcpu_svm
*svm
= to_svm(vcpu
);
2041 u64 fault_address
= svm
->vmcb
->control
.exit_info_2
;
2042 u64 error_code
= svm
->vmcb
->control
.exit_info_1
;
2044 return kvm_handle_page_fault(vcpu
, error_code
, fault_address
,
2045 static_cpu_has(X86_FEATURE_DECODEASSISTS
) ?
2046 svm
->vmcb
->control
.insn_bytes
: NULL
,
2047 svm
->vmcb
->control
.insn_len
);
2050 static int npf_interception(struct kvm_vcpu
*vcpu
)
2052 struct vcpu_svm
*svm
= to_svm(vcpu
);
2054 u64 fault_address
= svm
->vmcb
->control
.exit_info_2
;
2055 u64 error_code
= svm
->vmcb
->control
.exit_info_1
;
2057 trace_kvm_page_fault(vcpu
, fault_address
, error_code
);
2058 return kvm_mmu_page_fault(vcpu
, fault_address
, error_code
,
2059 static_cpu_has(X86_FEATURE_DECODEASSISTS
) ?
2060 svm
->vmcb
->control
.insn_bytes
: NULL
,
2061 svm
->vmcb
->control
.insn_len
);
2064 static int db_interception(struct kvm_vcpu
*vcpu
)
2066 struct kvm_run
*kvm_run
= vcpu
->run
;
2067 struct vcpu_svm
*svm
= to_svm(vcpu
);
2069 if (!(vcpu
->guest_debug
&
2070 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
)) &&
2071 !svm
->nmi_singlestep
) {
2072 u32 payload
= svm
->vmcb
->save
.dr6
^ DR6_ACTIVE_LOW
;
2073 kvm_queue_exception_p(vcpu
, DB_VECTOR
, payload
);
2077 if (svm
->nmi_singlestep
) {
2078 disable_nmi_singlestep(svm
);
2079 /* Make sure we check for pending NMIs upon entry */
2080 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2083 if (vcpu
->guest_debug
&
2084 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
)) {
2085 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
2086 kvm_run
->debug
.arch
.dr6
= svm
->vmcb
->save
.dr6
;
2087 kvm_run
->debug
.arch
.dr7
= svm
->vmcb
->save
.dr7
;
2088 kvm_run
->debug
.arch
.pc
=
2089 svm
->vmcb
->save
.cs
.base
+ svm
->vmcb
->save
.rip
;
2090 kvm_run
->debug
.arch
.exception
= DB_VECTOR
;
2097 static int bp_interception(struct kvm_vcpu
*vcpu
)
2099 struct vcpu_svm
*svm
= to_svm(vcpu
);
2100 struct kvm_run
*kvm_run
= vcpu
->run
;
2102 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
2103 kvm_run
->debug
.arch
.pc
= svm
->vmcb
->save
.cs
.base
+ svm
->vmcb
->save
.rip
;
2104 kvm_run
->debug
.arch
.exception
= BP_VECTOR
;
2108 static int ud_interception(struct kvm_vcpu
*vcpu
)
2110 return handle_ud(vcpu
);
2113 static int ac_interception(struct kvm_vcpu
*vcpu
)
2115 kvm_queue_exception_e(vcpu
, AC_VECTOR
, 0);
2119 static bool is_erratum_383(void)
2124 if (!erratum_383_found
)
2127 value
= native_read_msr_safe(MSR_IA32_MC0_STATUS
, &err
);
2131 /* Bit 62 may or may not be set for this mce */
2132 value
&= ~(1ULL << 62);
2134 if (value
!= 0xb600000000010015ULL
)
2137 /* Clear MCi_STATUS registers */
2138 for (i
= 0; i
< 6; ++i
)
2139 native_write_msr_safe(MSR_IA32_MCx_STATUS(i
), 0, 0);
2141 value
= native_read_msr_safe(MSR_IA32_MCG_STATUS
, &err
);
2145 value
&= ~(1ULL << 2);
2146 low
= lower_32_bits(value
);
2147 high
= upper_32_bits(value
);
2149 native_write_msr_safe(MSR_IA32_MCG_STATUS
, low
, high
);
2152 /* Flush tlb to evict multi-match entries */
2158 static void svm_handle_mce(struct kvm_vcpu
*vcpu
)
2160 if (is_erratum_383()) {
2162 * Erratum 383 triggered. Guest state is corrupt so kill the
2165 pr_err("Guest triggered AMD Erratum 383\n");
2167 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
2173 * On an #MC intercept the MCE handler is not called automatically in
2174 * the host. So do it by hand here.
2176 kvm_machine_check();
2179 static int mc_interception(struct kvm_vcpu
*vcpu
)
2184 static int shutdown_interception(struct kvm_vcpu
*vcpu
)
2186 struct kvm_run
*kvm_run
= vcpu
->run
;
2187 struct vcpu_svm
*svm
= to_svm(vcpu
);
2191 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
2192 * the VMCB in a known good state. Unfortuately, KVM doesn't have
2193 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2194 * userspace. At a platform view, INIT is acceptable behavior as
2195 * there exist bare metal platforms that automatically INIT the CPU
2196 * in response to shutdown.
2198 * The VM save area for SEV-ES guests has already been encrypted so it
2199 * cannot be reinitialized, i.e. synthesizing INIT is futile.
2201 if (!sev_es_guest(vcpu
->kvm
)) {
2202 clear_page(svm
->vmcb
);
2203 kvm_vcpu_reset(vcpu
, true);
2206 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
2210 static int io_interception(struct kvm_vcpu
*vcpu
)
2212 struct vcpu_svm
*svm
= to_svm(vcpu
);
2213 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; /* address size bug? */
2214 int size
, in
, string
;
2217 ++vcpu
->stat
.io_exits
;
2218 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
2219 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
2220 port
= io_info
>> 16;
2221 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
2224 if (sev_es_guest(vcpu
->kvm
))
2225 return sev_es_string_io(svm
, size
, port
, in
);
2227 return kvm_emulate_instruction(vcpu
, 0);
2230 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
2232 return kvm_fast_pio(vcpu
, size
, port
, in
);
2235 static int nmi_interception(struct kvm_vcpu
*vcpu
)
2240 static int smi_interception(struct kvm_vcpu
*vcpu
)
2245 static int intr_interception(struct kvm_vcpu
*vcpu
)
2247 ++vcpu
->stat
.irq_exits
;
2251 static int vmload_vmsave_interception(struct kvm_vcpu
*vcpu
, bool vmload
)
2253 struct vcpu_svm
*svm
= to_svm(vcpu
);
2254 struct vmcb
*vmcb12
;
2255 struct kvm_host_map map
;
2258 if (nested_svm_check_permissions(vcpu
))
2261 ret
= kvm_vcpu_map(vcpu
, gpa_to_gfn(svm
->vmcb
->save
.rax
), &map
);
2264 kvm_inject_gp(vcpu
, 0);
2270 ret
= kvm_skip_emulated_instruction(vcpu
);
2273 svm_copy_vmloadsave_state(svm
->vmcb
, vmcb12
);
2274 svm
->sysenter_eip_hi
= 0;
2275 svm
->sysenter_esp_hi
= 0;
2277 svm_copy_vmloadsave_state(vmcb12
, svm
->vmcb
);
2280 kvm_vcpu_unmap(vcpu
, &map
, true);
2285 static int vmload_interception(struct kvm_vcpu
*vcpu
)
2287 return vmload_vmsave_interception(vcpu
, true);
2290 static int vmsave_interception(struct kvm_vcpu
*vcpu
)
2292 return vmload_vmsave_interception(vcpu
, false);
2295 static int vmrun_interception(struct kvm_vcpu
*vcpu
)
2297 if (nested_svm_check_permissions(vcpu
))
2300 return nested_svm_vmrun(vcpu
);
2310 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2311 static int svm_instr_opcode(struct kvm_vcpu
*vcpu
)
2313 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
2315 if (ctxt
->b
!= 0x1 || ctxt
->opcode_len
!= 2)
2316 return NONE_SVM_INSTR
;
2318 switch (ctxt
->modrm
) {
2319 case 0xd8: /* VMRUN */
2320 return SVM_INSTR_VMRUN
;
2321 case 0xda: /* VMLOAD */
2322 return SVM_INSTR_VMLOAD
;
2323 case 0xdb: /* VMSAVE */
2324 return SVM_INSTR_VMSAVE
;
2329 return NONE_SVM_INSTR
;
2332 static int emulate_svm_instr(struct kvm_vcpu
*vcpu
, int opcode
)
2334 const int guest_mode_exit_codes
[] = {
2335 [SVM_INSTR_VMRUN
] = SVM_EXIT_VMRUN
,
2336 [SVM_INSTR_VMLOAD
] = SVM_EXIT_VMLOAD
,
2337 [SVM_INSTR_VMSAVE
] = SVM_EXIT_VMSAVE
,
2339 int (*const svm_instr_handlers
[])(struct kvm_vcpu
*vcpu
) = {
2340 [SVM_INSTR_VMRUN
] = vmrun_interception
,
2341 [SVM_INSTR_VMLOAD
] = vmload_interception
,
2342 [SVM_INSTR_VMSAVE
] = vmsave_interception
,
2344 struct vcpu_svm
*svm
= to_svm(vcpu
);
2347 if (is_guest_mode(vcpu
)) {
2348 /* Returns '1' or -errno on failure, '0' on success. */
2349 ret
= nested_svm_simple_vmexit(svm
, guest_mode_exit_codes
[opcode
]);
2354 return svm_instr_handlers
[opcode
](vcpu
);
2358 * #GP handling code. Note that #GP can be triggered under the following two
2360 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2361 * some AMD CPUs when EAX of these instructions are in the reserved memory
2362 * regions (e.g. SMM memory on host).
2363 * 2) VMware backdoor
2365 static int gp_interception(struct kvm_vcpu
*vcpu
)
2367 struct vcpu_svm
*svm
= to_svm(vcpu
);
2368 u32 error_code
= svm
->vmcb
->control
.exit_info_1
;
2371 /* Both #GP cases have zero error_code */
2375 /* Decode the instruction for usage later */
2376 if (x86_decode_emulated_instruction(vcpu
, 0, NULL
, 0) != EMULATION_OK
)
2379 opcode
= svm_instr_opcode(vcpu
);
2381 if (opcode
== NONE_SVM_INSTR
) {
2382 if (!enable_vmware_backdoor
)
2386 * VMware backdoor emulation on #GP interception only handles
2387 * IN{S}, OUT{S}, and RDPMC.
2389 if (!is_guest_mode(vcpu
))
2390 return kvm_emulate_instruction(vcpu
,
2391 EMULTYPE_VMWARE_GP
| EMULTYPE_NO_DECODE
);
2393 /* All SVM instructions expect page aligned RAX */
2394 if (svm
->vmcb
->save
.rax
& ~PAGE_MASK
)
2397 return emulate_svm_instr(vcpu
, opcode
);
2401 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
2405 void svm_set_gif(struct vcpu_svm
*svm
, bool value
)
2409 * If VGIF is enabled, the STGI intercept is only added to
2410 * detect the opening of the SMI/NMI window; remove it now.
2411 * Likewise, clear the VINTR intercept, we will set it
2412 * again while processing KVM_REQ_EVENT if needed.
2415 svm_clr_intercept(svm
, INTERCEPT_STGI
);
2416 if (svm_is_intercept(svm
, INTERCEPT_VINTR
))
2417 svm_clear_vintr(svm
);
2420 if (svm
->vcpu
.arch
.smi_pending
||
2421 svm
->vcpu
.arch
.nmi_pending
||
2422 kvm_cpu_has_injectable_intr(&svm
->vcpu
) ||
2423 kvm_apic_has_pending_init_or_sipi(&svm
->vcpu
))
2424 kvm_make_request(KVM_REQ_EVENT
, &svm
->vcpu
);
2429 * After a CLGI no interrupts should come. But if vGIF is
2430 * in use, we still rely on the VINTR intercept (rather than
2431 * STGI) to detect an open interrupt window.
2434 svm_clear_vintr(svm
);
2438 static int stgi_interception(struct kvm_vcpu
*vcpu
)
2442 if (nested_svm_check_permissions(vcpu
))
2445 ret
= kvm_skip_emulated_instruction(vcpu
);
2446 svm_set_gif(to_svm(vcpu
), true);
2450 static int clgi_interception(struct kvm_vcpu
*vcpu
)
2454 if (nested_svm_check_permissions(vcpu
))
2457 ret
= kvm_skip_emulated_instruction(vcpu
);
2458 svm_set_gif(to_svm(vcpu
), false);
2462 static int invlpga_interception(struct kvm_vcpu
*vcpu
)
2464 gva_t gva
= kvm_rax_read(vcpu
);
2465 u32 asid
= kvm_rcx_read(vcpu
);
2467 /* FIXME: Handle an address size prefix. */
2468 if (!is_long_mode(vcpu
))
2471 trace_kvm_invlpga(to_svm(vcpu
)->vmcb
->save
.rip
, asid
, gva
);
2473 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2474 kvm_mmu_invlpg(vcpu
, gva
);
2476 return kvm_skip_emulated_instruction(vcpu
);
2479 static int skinit_interception(struct kvm_vcpu
*vcpu
)
2481 trace_kvm_skinit(to_svm(vcpu
)->vmcb
->save
.rip
, kvm_rax_read(vcpu
));
2483 kvm_queue_exception(vcpu
, UD_VECTOR
);
2487 static int task_switch_interception(struct kvm_vcpu
*vcpu
)
2489 struct vcpu_svm
*svm
= to_svm(vcpu
);
2492 int int_type
= svm
->vmcb
->control
.exit_int_info
&
2493 SVM_EXITINTINFO_TYPE_MASK
;
2494 int int_vec
= svm
->vmcb
->control
.exit_int_info
& SVM_EVTINJ_VEC_MASK
;
2496 svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_TYPE_MASK
;
2498 svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_VALID
;
2499 bool has_error_code
= false;
2502 tss_selector
= (u16
)svm
->vmcb
->control
.exit_info_1
;
2504 if (svm
->vmcb
->control
.exit_info_2
&
2505 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET
))
2506 reason
= TASK_SWITCH_IRET
;
2507 else if (svm
->vmcb
->control
.exit_info_2
&
2508 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP
))
2509 reason
= TASK_SWITCH_JMP
;
2511 reason
= TASK_SWITCH_GATE
;
2513 reason
= TASK_SWITCH_CALL
;
2515 if (reason
== TASK_SWITCH_GATE
) {
2517 case SVM_EXITINTINFO_TYPE_NMI
:
2518 vcpu
->arch
.nmi_injected
= false;
2520 case SVM_EXITINTINFO_TYPE_EXEPT
:
2521 if (svm
->vmcb
->control
.exit_info_2
&
2522 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE
)) {
2523 has_error_code
= true;
2525 (u32
)svm
->vmcb
->control
.exit_info_2
;
2527 kvm_clear_exception_queue(vcpu
);
2529 case SVM_EXITINTINFO_TYPE_INTR
:
2530 case SVM_EXITINTINFO_TYPE_SOFT
:
2531 kvm_clear_interrupt_queue(vcpu
);
2538 if (reason
!= TASK_SWITCH_GATE
||
2539 int_type
== SVM_EXITINTINFO_TYPE_SOFT
||
2540 (int_type
== SVM_EXITINTINFO_TYPE_EXEPT
&&
2541 (int_vec
== OF_VECTOR
|| int_vec
== BP_VECTOR
))) {
2542 if (!svm_skip_emulated_instruction(vcpu
))
2546 if (int_type
!= SVM_EXITINTINFO_TYPE_SOFT
)
2549 return kvm_task_switch(vcpu
, tss_selector
, int_vec
, reason
,
2550 has_error_code
, error_code
);
2553 static void svm_clr_iret_intercept(struct vcpu_svm
*svm
)
2555 if (!sev_es_guest(svm
->vcpu
.kvm
))
2556 svm_clr_intercept(svm
, INTERCEPT_IRET
);
2559 static void svm_set_iret_intercept(struct vcpu_svm
*svm
)
2561 if (!sev_es_guest(svm
->vcpu
.kvm
))
2562 svm_set_intercept(svm
, INTERCEPT_IRET
);
2565 static int iret_interception(struct kvm_vcpu
*vcpu
)
2567 struct vcpu_svm
*svm
= to_svm(vcpu
);
2569 WARN_ON_ONCE(sev_es_guest(vcpu
->kvm
));
2571 ++vcpu
->stat
.nmi_window_exits
;
2572 svm
->awaiting_iret_completion
= true;
2574 svm_clr_iret_intercept(svm
);
2575 svm
->nmi_iret_rip
= kvm_rip_read(vcpu
);
2577 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2581 static int invlpg_interception(struct kvm_vcpu
*vcpu
)
2583 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS
))
2584 return kvm_emulate_instruction(vcpu
, 0);
2586 kvm_mmu_invlpg(vcpu
, to_svm(vcpu
)->vmcb
->control
.exit_info_1
);
2587 return kvm_skip_emulated_instruction(vcpu
);
2590 static int emulate_on_interception(struct kvm_vcpu
*vcpu
)
2592 return kvm_emulate_instruction(vcpu
, 0);
2595 static int rsm_interception(struct kvm_vcpu
*vcpu
)
2597 return kvm_emulate_instruction_from_buffer(vcpu
, rsm_ins_bytes
, 2);
2600 static bool check_selective_cr0_intercepted(struct kvm_vcpu
*vcpu
,
2603 struct vcpu_svm
*svm
= to_svm(vcpu
);
2604 unsigned long cr0
= vcpu
->arch
.cr0
;
2607 if (!is_guest_mode(vcpu
) ||
2608 (!(vmcb12_is_intercept(&svm
->nested
.ctl
, INTERCEPT_SELECTIVE_CR0
))))
2611 cr0
&= ~SVM_CR0_SELECTIVE_MASK
;
2612 val
&= ~SVM_CR0_SELECTIVE_MASK
;
2615 svm
->vmcb
->control
.exit_code
= SVM_EXIT_CR0_SEL_WRITE
;
2616 ret
= (nested_svm_exit_handled(svm
) == NESTED_EXIT_DONE
);
2622 #define CR_VALID (1ULL << 63)
2624 static int cr_interception(struct kvm_vcpu
*vcpu
)
2626 struct vcpu_svm
*svm
= to_svm(vcpu
);
2631 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS
))
2632 return emulate_on_interception(vcpu
);
2634 if (unlikely((svm
->vmcb
->control
.exit_info_1
& CR_VALID
) == 0))
2635 return emulate_on_interception(vcpu
);
2637 reg
= svm
->vmcb
->control
.exit_info_1
& SVM_EXITINFO_REG_MASK
;
2638 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_CR0_SEL_WRITE
)
2639 cr
= SVM_EXIT_WRITE_CR0
- SVM_EXIT_READ_CR0
;
2641 cr
= svm
->vmcb
->control
.exit_code
- SVM_EXIT_READ_CR0
;
2644 if (cr
>= 16) { /* mov to cr */
2646 val
= kvm_register_read(vcpu
, reg
);
2647 trace_kvm_cr_write(cr
, val
);
2650 if (!check_selective_cr0_intercepted(vcpu
, val
))
2651 err
= kvm_set_cr0(vcpu
, val
);
2657 err
= kvm_set_cr3(vcpu
, val
);
2660 err
= kvm_set_cr4(vcpu
, val
);
2663 err
= kvm_set_cr8(vcpu
, val
);
2666 WARN(1, "unhandled write to CR%d", cr
);
2667 kvm_queue_exception(vcpu
, UD_VECTOR
);
2670 } else { /* mov from cr */
2673 val
= kvm_read_cr0(vcpu
);
2676 val
= vcpu
->arch
.cr2
;
2679 val
= kvm_read_cr3(vcpu
);
2682 val
= kvm_read_cr4(vcpu
);
2685 val
= kvm_get_cr8(vcpu
);
2688 WARN(1, "unhandled read from CR%d", cr
);
2689 kvm_queue_exception(vcpu
, UD_VECTOR
);
2692 kvm_register_write(vcpu
, reg
, val
);
2693 trace_kvm_cr_read(cr
, val
);
2695 return kvm_complete_insn_gp(vcpu
, err
);
2698 static int cr_trap(struct kvm_vcpu
*vcpu
)
2700 struct vcpu_svm
*svm
= to_svm(vcpu
);
2701 unsigned long old_value
, new_value
;
2705 new_value
= (unsigned long)svm
->vmcb
->control
.exit_info_1
;
2707 cr
= svm
->vmcb
->control
.exit_code
- SVM_EXIT_CR0_WRITE_TRAP
;
2710 old_value
= kvm_read_cr0(vcpu
);
2711 svm_set_cr0(vcpu
, new_value
);
2713 kvm_post_set_cr0(vcpu
, old_value
, new_value
);
2716 old_value
= kvm_read_cr4(vcpu
);
2717 svm_set_cr4(vcpu
, new_value
);
2719 kvm_post_set_cr4(vcpu
, old_value
, new_value
);
2722 ret
= kvm_set_cr8(vcpu
, new_value
);
2725 WARN(1, "unhandled CR%d write trap", cr
);
2726 kvm_queue_exception(vcpu
, UD_VECTOR
);
2730 return kvm_complete_insn_gp(vcpu
, ret
);
2733 static int dr_interception(struct kvm_vcpu
*vcpu
)
2735 struct vcpu_svm
*svm
= to_svm(vcpu
);
2741 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
2742 * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
2744 if (sev_es_guest(vcpu
->kvm
))
2747 if (vcpu
->guest_debug
== 0) {
2749 * No more DR vmexits; force a reload of the debug registers
2750 * and reenter on this instruction. The next vmexit will
2751 * retrieve the full state of the debug registers.
2753 clr_dr_intercepts(svm
);
2754 vcpu
->arch
.switch_db_regs
|= KVM_DEBUGREG_WONT_EXIT
;
2758 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS
))
2759 return emulate_on_interception(vcpu
);
2761 reg
= svm
->vmcb
->control
.exit_info_1
& SVM_EXITINFO_REG_MASK
;
2762 dr
= svm
->vmcb
->control
.exit_code
- SVM_EXIT_READ_DR0
;
2763 if (dr
>= 16) { /* mov to DRn */
2765 val
= kvm_register_read(vcpu
, reg
);
2766 err
= kvm_set_dr(vcpu
, dr
, val
);
2768 kvm_get_dr(vcpu
, dr
, &val
);
2769 kvm_register_write(vcpu
, reg
, val
);
2772 return kvm_complete_insn_gp(vcpu
, err
);
2775 static int cr8_write_interception(struct kvm_vcpu
*vcpu
)
2779 u8 cr8_prev
= kvm_get_cr8(vcpu
);
2780 /* instruction emulation calls kvm_set_cr8() */
2781 r
= cr_interception(vcpu
);
2782 if (lapic_in_kernel(vcpu
))
2784 if (cr8_prev
<= kvm_get_cr8(vcpu
))
2786 vcpu
->run
->exit_reason
= KVM_EXIT_SET_TPR
;
2790 static int efer_trap(struct kvm_vcpu
*vcpu
)
2792 struct msr_data msr_info
;
2796 * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2797 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2798 * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2799 * the guest doesn't have X86_FEATURE_SVM.
2801 msr_info
.host_initiated
= false;
2802 msr_info
.index
= MSR_EFER
;
2803 msr_info
.data
= to_svm(vcpu
)->vmcb
->control
.exit_info_1
& ~EFER_SVME
;
2804 ret
= kvm_set_msr_common(vcpu
, &msr_info
);
2806 return kvm_complete_insn_gp(vcpu
, ret
);
2809 static int svm_get_msr_feature(struct kvm_msr_entry
*msr
)
2813 switch (msr
->index
) {
2814 case MSR_AMD64_DE_CFG
:
2815 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC
))
2816 msr
->data
|= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE
;
2819 return KVM_MSR_RET_INVALID
;
2825 static int svm_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
2827 struct vcpu_svm
*svm
= to_svm(vcpu
);
2829 switch (msr_info
->index
) {
2830 case MSR_AMD64_TSC_RATIO
:
2831 if (!msr_info
->host_initiated
&&
2832 !guest_can_use(vcpu
, X86_FEATURE_TSCRATEMSR
))
2834 msr_info
->data
= svm
->tsc_ratio_msr
;
2837 msr_info
->data
= svm
->vmcb01
.ptr
->save
.star
;
2839 #ifdef CONFIG_X86_64
2841 msr_info
->data
= svm
->vmcb01
.ptr
->save
.lstar
;
2844 msr_info
->data
= svm
->vmcb01
.ptr
->save
.cstar
;
2846 case MSR_KERNEL_GS_BASE
:
2847 msr_info
->data
= svm
->vmcb01
.ptr
->save
.kernel_gs_base
;
2849 case MSR_SYSCALL_MASK
:
2850 msr_info
->data
= svm
->vmcb01
.ptr
->save
.sfmask
;
2853 case MSR_IA32_SYSENTER_CS
:
2854 msr_info
->data
= svm
->vmcb01
.ptr
->save
.sysenter_cs
;
2856 case MSR_IA32_SYSENTER_EIP
:
2857 msr_info
->data
= (u32
)svm
->vmcb01
.ptr
->save
.sysenter_eip
;
2858 if (guest_cpuid_is_intel(vcpu
))
2859 msr_info
->data
|= (u64
)svm
->sysenter_eip_hi
<< 32;
2861 case MSR_IA32_SYSENTER_ESP
:
2862 msr_info
->data
= svm
->vmcb01
.ptr
->save
.sysenter_esp
;
2863 if (guest_cpuid_is_intel(vcpu
))
2864 msr_info
->data
|= (u64
)svm
->sysenter_esp_hi
<< 32;
2867 msr_info
->data
= svm
->tsc_aux
;
2869 case MSR_IA32_DEBUGCTLMSR
:
2870 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.dbgctl
;
2872 case MSR_IA32_LASTBRANCHFROMIP
:
2873 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.br_from
;
2875 case MSR_IA32_LASTBRANCHTOIP
:
2876 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.br_to
;
2878 case MSR_IA32_LASTINTFROMIP
:
2879 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.last_excp_from
;
2881 case MSR_IA32_LASTINTTOIP
:
2882 msr_info
->data
= svm_get_lbr_vmcb(svm
)->save
.last_excp_to
;
2884 case MSR_VM_HSAVE_PA
:
2885 msr_info
->data
= svm
->nested
.hsave_msr
;
2888 msr_info
->data
= svm
->nested
.vm_cr_msr
;
2890 case MSR_IA32_SPEC_CTRL
:
2891 if (!msr_info
->host_initiated
&&
2892 !guest_has_spec_ctrl_msr(vcpu
))
2895 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
2896 msr_info
->data
= svm
->vmcb
->save
.spec_ctrl
;
2898 msr_info
->data
= svm
->spec_ctrl
;
2900 case MSR_AMD64_VIRT_SPEC_CTRL
:
2901 if (!msr_info
->host_initiated
&&
2902 !guest_cpuid_has(vcpu
, X86_FEATURE_VIRT_SSBD
))
2905 msr_info
->data
= svm
->virt_spec_ctrl
;
2907 case MSR_F15H_IC_CFG
: {
2911 family
= guest_cpuid_family(vcpu
);
2912 model
= guest_cpuid_model(vcpu
);
2914 if (family
< 0 || model
< 0)
2915 return kvm_get_msr_common(vcpu
, msr_info
);
2919 if (family
== 0x15 &&
2920 (model
>= 0x2 && model
< 0x20))
2921 msr_info
->data
= 0x1E;
2924 case MSR_AMD64_DE_CFG
:
2925 msr_info
->data
= svm
->msr_decfg
;
2928 return kvm_get_msr_common(vcpu
, msr_info
);
2933 static int svm_complete_emulated_msr(struct kvm_vcpu
*vcpu
, int err
)
2935 struct vcpu_svm
*svm
= to_svm(vcpu
);
2936 if (!err
|| !sev_es_guest(vcpu
->kvm
) || WARN_ON_ONCE(!svm
->sev_es
.ghcb
))
2937 return kvm_complete_insn_gp(vcpu
, err
);
2939 ghcb_set_sw_exit_info_1(svm
->sev_es
.ghcb
, 1);
2940 ghcb_set_sw_exit_info_2(svm
->sev_es
.ghcb
,
2942 SVM_EVTINJ_TYPE_EXEPT
|
2947 static int svm_set_vm_cr(struct kvm_vcpu
*vcpu
, u64 data
)
2949 struct vcpu_svm
*svm
= to_svm(vcpu
);
2950 int svm_dis
, chg_mask
;
2952 if (data
& ~SVM_VM_CR_VALID_MASK
)
2955 chg_mask
= SVM_VM_CR_VALID_MASK
;
2957 if (svm
->nested
.vm_cr_msr
& SVM_VM_CR_SVM_DIS_MASK
)
2958 chg_mask
&= ~(SVM_VM_CR_SVM_LOCK_MASK
| SVM_VM_CR_SVM_DIS_MASK
);
2960 svm
->nested
.vm_cr_msr
&= ~chg_mask
;
2961 svm
->nested
.vm_cr_msr
|= (data
& chg_mask
);
2963 svm_dis
= svm
->nested
.vm_cr_msr
& SVM_VM_CR_SVM_DIS_MASK
;
2965 /* check for svm_disable while efer.svme is set */
2966 if (svm_dis
&& (vcpu
->arch
.efer
& EFER_SVME
))
2972 static int svm_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
)
2974 struct vcpu_svm
*svm
= to_svm(vcpu
);
2977 u32 ecx
= msr
->index
;
2978 u64 data
= msr
->data
;
2980 case MSR_AMD64_TSC_RATIO
:
2982 if (!guest_can_use(vcpu
, X86_FEATURE_TSCRATEMSR
)) {
2984 if (!msr
->host_initiated
)
2987 * In case TSC scaling is not enabled, always
2988 * leave this MSR at the default value.
2990 * Due to bug in qemu 6.2.0, it would try to set
2991 * this msr to 0 if tsc scaling is not enabled.
2992 * Ignore this value as well.
2994 if (data
!= 0 && data
!= svm
->tsc_ratio_msr
)
2999 if (data
& SVM_TSC_RATIO_RSVD
)
3002 svm
->tsc_ratio_msr
= data
;
3004 if (guest_can_use(vcpu
, X86_FEATURE_TSCRATEMSR
) &&
3005 is_guest_mode(vcpu
))
3006 nested_svm_update_tsc_ratio_msr(vcpu
);
3009 case MSR_IA32_CR_PAT
:
3010 ret
= kvm_set_msr_common(vcpu
, msr
);
3014 svm
->vmcb01
.ptr
->save
.g_pat
= data
;
3015 if (is_guest_mode(vcpu
))
3016 nested_vmcb02_compute_g_pat(svm
);
3017 vmcb_mark_dirty(svm
->vmcb
, VMCB_NPT
);
3019 case MSR_IA32_SPEC_CTRL
:
3020 if (!msr
->host_initiated
&&
3021 !guest_has_spec_ctrl_msr(vcpu
))
3024 if (kvm_spec_ctrl_test_value(data
))
3027 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
3028 svm
->vmcb
->save
.spec_ctrl
= data
;
3030 svm
->spec_ctrl
= data
;
3036 * When it's written (to non-zero) for the first time, pass
3040 * The handling of the MSR bitmap for L2 guests is done in
3041 * nested_svm_vmrun_msrpm.
3042 * We update the L1 MSR bit as well since it will end up
3043 * touching the MSR anyway now.
3045 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_SPEC_CTRL
, 1, 1);
3047 case MSR_AMD64_VIRT_SPEC_CTRL
:
3048 if (!msr
->host_initiated
&&
3049 !guest_cpuid_has(vcpu
, X86_FEATURE_VIRT_SSBD
))
3052 if (data
& ~SPEC_CTRL_SSBD
)
3055 svm
->virt_spec_ctrl
= data
;
3058 svm
->vmcb01
.ptr
->save
.star
= data
;
3060 #ifdef CONFIG_X86_64
3062 svm
->vmcb01
.ptr
->save
.lstar
= data
;
3065 svm
->vmcb01
.ptr
->save
.cstar
= data
;
3067 case MSR_KERNEL_GS_BASE
:
3068 svm
->vmcb01
.ptr
->save
.kernel_gs_base
= data
;
3070 case MSR_SYSCALL_MASK
:
3071 svm
->vmcb01
.ptr
->save
.sfmask
= data
;
3074 case MSR_IA32_SYSENTER_CS
:
3075 svm
->vmcb01
.ptr
->save
.sysenter_cs
= data
;
3077 case MSR_IA32_SYSENTER_EIP
:
3078 svm
->vmcb01
.ptr
->save
.sysenter_eip
= (u32
)data
;
3080 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3081 * when we spoof an Intel vendor ID (for cross vendor migration).
3082 * In this case we use this intercept to track the high
3083 * 32 bit part of these msrs to support Intel's
3084 * implementation of SYSENTER/SYSEXIT.
3086 svm
->sysenter_eip_hi
= guest_cpuid_is_intel(vcpu
) ? (data
>> 32) : 0;
3088 case MSR_IA32_SYSENTER_ESP
:
3089 svm
->vmcb01
.ptr
->save
.sysenter_esp
= (u32
)data
;
3090 svm
->sysenter_esp_hi
= guest_cpuid_is_intel(vcpu
) ? (data
>> 32) : 0;
3094 * TSC_AUX is always virtualized for SEV-ES guests when the
3095 * feature is available. The user return MSR support is not
3096 * required in this case because TSC_AUX is restored on #VMEXIT
3097 * from the host save area (which has been initialized in
3098 * svm_hardware_enable()).
3100 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX
) && sev_es_guest(vcpu
->kvm
))
3104 * TSC_AUX is usually changed only during boot and never read
3105 * directly. Intercept TSC_AUX instead of exposing it to the
3106 * guest via direct_access_msrs, and switch it via user return.
3109 ret
= kvm_set_user_return_msr(tsc_aux_uret_slot
, data
, -1ull);
3114 svm
->tsc_aux
= data
;
3116 case MSR_IA32_DEBUGCTLMSR
:
3118 kvm_pr_unimpl_wrmsr(vcpu
, ecx
, data
);
3121 if (data
& DEBUGCTL_RESERVED_BITS
)
3124 svm_get_lbr_vmcb(svm
)->save
.dbgctl
= data
;
3125 svm_update_lbrv(vcpu
);
3127 case MSR_VM_HSAVE_PA
:
3129 * Old kernels did not validate the value written to
3130 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
3131 * value to allow live migrating buggy or malicious guests
3132 * originating from those kernels.
3134 if (!msr
->host_initiated
&& !page_address_valid(vcpu
, data
))
3137 svm
->nested
.hsave_msr
= data
& PAGE_MASK
;
3140 return svm_set_vm_cr(vcpu
, data
);
3142 kvm_pr_unimpl_wrmsr(vcpu
, ecx
, data
);
3144 case MSR_AMD64_DE_CFG
: {
3145 struct kvm_msr_entry msr_entry
;
3147 msr_entry
.index
= msr
->index
;
3148 if (svm_get_msr_feature(&msr_entry
))
3151 /* Check the supported bits */
3152 if (data
& ~msr_entry
.data
)
3155 /* Don't allow the guest to change a bit, #GP */
3156 if (!msr
->host_initiated
&& (data
^ msr_entry
.data
))
3159 svm
->msr_decfg
= data
;
3163 return kvm_set_msr_common(vcpu
, msr
);
3168 static int msr_interception(struct kvm_vcpu
*vcpu
)
3170 if (to_svm(vcpu
)->vmcb
->control
.exit_info_1
)
3171 return kvm_emulate_wrmsr(vcpu
);
3173 return kvm_emulate_rdmsr(vcpu
);
3176 static int interrupt_window_interception(struct kvm_vcpu
*vcpu
)
3178 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3179 svm_clear_vintr(to_svm(vcpu
));
3182 * If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3183 * In this case AVIC was temporarily disabled for
3184 * requesting the IRQ window and we have to re-enable it.
3186 * If running nested, still remove the VM wide AVIC inhibit to
3187 * support case in which the interrupt window was requested when the
3188 * vCPU was not running nested.
3190 * All vCPUs which run still run nested, will remain to have their
3191 * AVIC still inhibited due to per-cpu AVIC inhibition.
3193 kvm_clear_apicv_inhibit(vcpu
->kvm
, APICV_INHIBIT_REASON_IRQWIN
);
3195 ++vcpu
->stat
.irq_window_exits
;
3199 static int pause_interception(struct kvm_vcpu
*vcpu
)
3203 * CPL is not made available for an SEV-ES guest, therefore
3204 * vcpu->arch.preempted_in_kernel can never be true. Just
3205 * set in_kernel to false as well.
3207 in_kernel
= !sev_es_guest(vcpu
->kvm
) && svm_get_cpl(vcpu
) == 0;
3209 grow_ple_window(vcpu
);
3211 kvm_vcpu_on_spin(vcpu
, in_kernel
);
3212 return kvm_skip_emulated_instruction(vcpu
);
3215 static int invpcid_interception(struct kvm_vcpu
*vcpu
)
3217 struct vcpu_svm
*svm
= to_svm(vcpu
);
3221 if (!guest_cpuid_has(vcpu
, X86_FEATURE_INVPCID
)) {
3222 kvm_queue_exception(vcpu
, UD_VECTOR
);
3227 * For an INVPCID intercept:
3228 * EXITINFO1 provides the linear address of the memory operand.
3229 * EXITINFO2 provides the contents of the register operand.
3231 type
= svm
->vmcb
->control
.exit_info_2
;
3232 gva
= svm
->vmcb
->control
.exit_info_1
;
3234 return kvm_handle_invpcid(vcpu
, type
, gva
);
3237 static int (*const svm_exit_handlers
[])(struct kvm_vcpu
*vcpu
) = {
3238 [SVM_EXIT_READ_CR0
] = cr_interception
,
3239 [SVM_EXIT_READ_CR3
] = cr_interception
,
3240 [SVM_EXIT_READ_CR4
] = cr_interception
,
3241 [SVM_EXIT_READ_CR8
] = cr_interception
,
3242 [SVM_EXIT_CR0_SEL_WRITE
] = cr_interception
,
3243 [SVM_EXIT_WRITE_CR0
] = cr_interception
,
3244 [SVM_EXIT_WRITE_CR3
] = cr_interception
,
3245 [SVM_EXIT_WRITE_CR4
] = cr_interception
,
3246 [SVM_EXIT_WRITE_CR8
] = cr8_write_interception
,
3247 [SVM_EXIT_READ_DR0
] = dr_interception
,
3248 [SVM_EXIT_READ_DR1
] = dr_interception
,
3249 [SVM_EXIT_READ_DR2
] = dr_interception
,
3250 [SVM_EXIT_READ_DR3
] = dr_interception
,
3251 [SVM_EXIT_READ_DR4
] = dr_interception
,
3252 [SVM_EXIT_READ_DR5
] = dr_interception
,
3253 [SVM_EXIT_READ_DR6
] = dr_interception
,
3254 [SVM_EXIT_READ_DR7
] = dr_interception
,
3255 [SVM_EXIT_WRITE_DR0
] = dr_interception
,
3256 [SVM_EXIT_WRITE_DR1
] = dr_interception
,
3257 [SVM_EXIT_WRITE_DR2
] = dr_interception
,
3258 [SVM_EXIT_WRITE_DR3
] = dr_interception
,
3259 [SVM_EXIT_WRITE_DR4
] = dr_interception
,
3260 [SVM_EXIT_WRITE_DR5
] = dr_interception
,
3261 [SVM_EXIT_WRITE_DR6
] = dr_interception
,
3262 [SVM_EXIT_WRITE_DR7
] = dr_interception
,
3263 [SVM_EXIT_EXCP_BASE
+ DB_VECTOR
] = db_interception
,
3264 [SVM_EXIT_EXCP_BASE
+ BP_VECTOR
] = bp_interception
,
3265 [SVM_EXIT_EXCP_BASE
+ UD_VECTOR
] = ud_interception
,
3266 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
3267 [SVM_EXIT_EXCP_BASE
+ MC_VECTOR
] = mc_interception
,
3268 [SVM_EXIT_EXCP_BASE
+ AC_VECTOR
] = ac_interception
,
3269 [SVM_EXIT_EXCP_BASE
+ GP_VECTOR
] = gp_interception
,
3270 [SVM_EXIT_INTR
] = intr_interception
,
3271 [SVM_EXIT_NMI
] = nmi_interception
,
3272 [SVM_EXIT_SMI
] = smi_interception
,
3273 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
3274 [SVM_EXIT_RDPMC
] = kvm_emulate_rdpmc
,
3275 [SVM_EXIT_CPUID
] = kvm_emulate_cpuid
,
3276 [SVM_EXIT_IRET
] = iret_interception
,
3277 [SVM_EXIT_INVD
] = kvm_emulate_invd
,
3278 [SVM_EXIT_PAUSE
] = pause_interception
,
3279 [SVM_EXIT_HLT
] = kvm_emulate_halt
,
3280 [SVM_EXIT_INVLPG
] = invlpg_interception
,
3281 [SVM_EXIT_INVLPGA
] = invlpga_interception
,
3282 [SVM_EXIT_IOIO
] = io_interception
,
3283 [SVM_EXIT_MSR
] = msr_interception
,
3284 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
3285 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
3286 [SVM_EXIT_VMRUN
] = vmrun_interception
,
3287 [SVM_EXIT_VMMCALL
] = kvm_emulate_hypercall
,
3288 [SVM_EXIT_VMLOAD
] = vmload_interception
,
3289 [SVM_EXIT_VMSAVE
] = vmsave_interception
,
3290 [SVM_EXIT_STGI
] = stgi_interception
,
3291 [SVM_EXIT_CLGI
] = clgi_interception
,
3292 [SVM_EXIT_SKINIT
] = skinit_interception
,
3293 [SVM_EXIT_RDTSCP
] = kvm_handle_invalid_op
,
3294 [SVM_EXIT_WBINVD
] = kvm_emulate_wbinvd
,
3295 [SVM_EXIT_MONITOR
] = kvm_emulate_monitor
,
3296 [SVM_EXIT_MWAIT
] = kvm_emulate_mwait
,
3297 [SVM_EXIT_XSETBV
] = kvm_emulate_xsetbv
,
3298 [SVM_EXIT_RDPRU
] = kvm_handle_invalid_op
,
3299 [SVM_EXIT_EFER_WRITE_TRAP
] = efer_trap
,
3300 [SVM_EXIT_CR0_WRITE_TRAP
] = cr_trap
,
3301 [SVM_EXIT_CR4_WRITE_TRAP
] = cr_trap
,
3302 [SVM_EXIT_CR8_WRITE_TRAP
] = cr_trap
,
3303 [SVM_EXIT_INVPCID
] = invpcid_interception
,
3304 [SVM_EXIT_NPF
] = npf_interception
,
3305 [SVM_EXIT_RSM
] = rsm_interception
,
3306 [SVM_EXIT_AVIC_INCOMPLETE_IPI
] = avic_incomplete_ipi_interception
,
3307 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS
] = avic_unaccelerated_access_interception
,
3308 [SVM_EXIT_VMGEXIT
] = sev_handle_vmgexit
,
3311 static void dump_vmcb(struct kvm_vcpu
*vcpu
)
3313 struct vcpu_svm
*svm
= to_svm(vcpu
);
3314 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
3315 struct vmcb_save_area
*save
= &svm
->vmcb
->save
;
3316 struct vmcb_save_area
*save01
= &svm
->vmcb01
.ptr
->save
;
3318 if (!dump_invalid_vmcb
) {
3319 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3323 pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3324 svm
->current_vmcb
->ptr
, vcpu
->arch
.last_vmentry_cpu
);
3325 pr_err("VMCB Control Area:\n");
3326 pr_err("%-20s%04x\n", "cr_read:", control
->intercepts
[INTERCEPT_CR
] & 0xffff);
3327 pr_err("%-20s%04x\n", "cr_write:", control
->intercepts
[INTERCEPT_CR
] >> 16);
3328 pr_err("%-20s%04x\n", "dr_read:", control
->intercepts
[INTERCEPT_DR
] & 0xffff);
3329 pr_err("%-20s%04x\n", "dr_write:", control
->intercepts
[INTERCEPT_DR
] >> 16);
3330 pr_err("%-20s%08x\n", "exceptions:", control
->intercepts
[INTERCEPT_EXCEPTION
]);
3331 pr_err("%-20s%08x %08x\n", "intercepts:",
3332 control
->intercepts
[INTERCEPT_WORD3
],
3333 control
->intercepts
[INTERCEPT_WORD4
]);
3334 pr_err("%-20s%d\n", "pause filter count:", control
->pause_filter_count
);
3335 pr_err("%-20s%d\n", "pause filter threshold:",
3336 control
->pause_filter_thresh
);
3337 pr_err("%-20s%016llx\n", "iopm_base_pa:", control
->iopm_base_pa
);
3338 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control
->msrpm_base_pa
);
3339 pr_err("%-20s%016llx\n", "tsc_offset:", control
->tsc_offset
);
3340 pr_err("%-20s%d\n", "asid:", control
->asid
);
3341 pr_err("%-20s%d\n", "tlb_ctl:", control
->tlb_ctl
);
3342 pr_err("%-20s%08x\n", "int_ctl:", control
->int_ctl
);
3343 pr_err("%-20s%08x\n", "int_vector:", control
->int_vector
);
3344 pr_err("%-20s%08x\n", "int_state:", control
->int_state
);
3345 pr_err("%-20s%08x\n", "exit_code:", control
->exit_code
);
3346 pr_err("%-20s%016llx\n", "exit_info1:", control
->exit_info_1
);
3347 pr_err("%-20s%016llx\n", "exit_info2:", control
->exit_info_2
);
3348 pr_err("%-20s%08x\n", "exit_int_info:", control
->exit_int_info
);
3349 pr_err("%-20s%08x\n", "exit_int_info_err:", control
->exit_int_info_err
);
3350 pr_err("%-20s%lld\n", "nested_ctl:", control
->nested_ctl
);
3351 pr_err("%-20s%016llx\n", "nested_cr3:", control
->nested_cr3
);
3352 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control
->avic_vapic_bar
);
3353 pr_err("%-20s%016llx\n", "ghcb:", control
->ghcb_gpa
);
3354 pr_err("%-20s%08x\n", "event_inj:", control
->event_inj
);
3355 pr_err("%-20s%08x\n", "event_inj_err:", control
->event_inj_err
);
3356 pr_err("%-20s%lld\n", "virt_ext:", control
->virt_ext
);
3357 pr_err("%-20s%016llx\n", "next_rip:", control
->next_rip
);
3358 pr_err("%-20s%016llx\n", "avic_backing_page:", control
->avic_backing_page
);
3359 pr_err("%-20s%016llx\n", "avic_logical_id:", control
->avic_logical_id
);
3360 pr_err("%-20s%016llx\n", "avic_physical_id:", control
->avic_physical_id
);
3361 pr_err("%-20s%016llx\n", "vmsa_pa:", control
->vmsa_pa
);
3362 pr_err("VMCB State Save Area:\n");
3363 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3365 save
->es
.selector
, save
->es
.attrib
,
3366 save
->es
.limit
, save
->es
.base
);
3367 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3369 save
->cs
.selector
, save
->cs
.attrib
,
3370 save
->cs
.limit
, save
->cs
.base
);
3371 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3373 save
->ss
.selector
, save
->ss
.attrib
,
3374 save
->ss
.limit
, save
->ss
.base
);
3375 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3377 save
->ds
.selector
, save
->ds
.attrib
,
3378 save
->ds
.limit
, save
->ds
.base
);
3379 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3381 save01
->fs
.selector
, save01
->fs
.attrib
,
3382 save01
->fs
.limit
, save01
->fs
.base
);
3383 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3385 save01
->gs
.selector
, save01
->gs
.attrib
,
3386 save01
->gs
.limit
, save01
->gs
.base
);
3387 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3389 save
->gdtr
.selector
, save
->gdtr
.attrib
,
3390 save
->gdtr
.limit
, save
->gdtr
.base
);
3391 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3393 save01
->ldtr
.selector
, save01
->ldtr
.attrib
,
3394 save01
->ldtr
.limit
, save01
->ldtr
.base
);
3395 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3397 save
->idtr
.selector
, save
->idtr
.attrib
,
3398 save
->idtr
.limit
, save
->idtr
.base
);
3399 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3401 save01
->tr
.selector
, save01
->tr
.attrib
,
3402 save01
->tr
.limit
, save01
->tr
.base
);
3403 pr_err("vmpl: %d cpl: %d efer: %016llx\n",
3404 save
->vmpl
, save
->cpl
, save
->efer
);
3405 pr_err("%-15s %016llx %-13s %016llx\n",
3406 "cr0:", save
->cr0
, "cr2:", save
->cr2
);
3407 pr_err("%-15s %016llx %-13s %016llx\n",
3408 "cr3:", save
->cr3
, "cr4:", save
->cr4
);
3409 pr_err("%-15s %016llx %-13s %016llx\n",
3410 "dr6:", save
->dr6
, "dr7:", save
->dr7
);
3411 pr_err("%-15s %016llx %-13s %016llx\n",
3412 "rip:", save
->rip
, "rflags:", save
->rflags
);
3413 pr_err("%-15s %016llx %-13s %016llx\n",
3414 "rsp:", save
->rsp
, "rax:", save
->rax
);
3415 pr_err("%-15s %016llx %-13s %016llx\n",
3416 "star:", save01
->star
, "lstar:", save01
->lstar
);
3417 pr_err("%-15s %016llx %-13s %016llx\n",
3418 "cstar:", save01
->cstar
, "sfmask:", save01
->sfmask
);
3419 pr_err("%-15s %016llx %-13s %016llx\n",
3420 "kernel_gs_base:", save01
->kernel_gs_base
,
3421 "sysenter_cs:", save01
->sysenter_cs
);
3422 pr_err("%-15s %016llx %-13s %016llx\n",
3423 "sysenter_esp:", save01
->sysenter_esp
,
3424 "sysenter_eip:", save01
->sysenter_eip
);
3425 pr_err("%-15s %016llx %-13s %016llx\n",
3426 "gpat:", save
->g_pat
, "dbgctl:", save
->dbgctl
);
3427 pr_err("%-15s %016llx %-13s %016llx\n",
3428 "br_from:", save
->br_from
, "br_to:", save
->br_to
);
3429 pr_err("%-15s %016llx %-13s %016llx\n",
3430 "excp_from:", save
->last_excp_from
,
3431 "excp_to:", save
->last_excp_to
);
3434 static bool svm_check_exit_valid(u64 exit_code
)
3436 return (exit_code
< ARRAY_SIZE(svm_exit_handlers
) &&
3437 svm_exit_handlers
[exit_code
]);
3440 static int svm_handle_invalid_exit(struct kvm_vcpu
*vcpu
, u64 exit_code
)
3442 vcpu_unimpl(vcpu
, "svm: unexpected exit reason 0x%llx\n", exit_code
);
3444 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3445 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON
;
3446 vcpu
->run
->internal
.ndata
= 2;
3447 vcpu
->run
->internal
.data
[0] = exit_code
;
3448 vcpu
->run
->internal
.data
[1] = vcpu
->arch
.last_vmentry_cpu
;
3452 int svm_invoke_exit_handler(struct kvm_vcpu
*vcpu
, u64 exit_code
)
3454 if (!svm_check_exit_valid(exit_code
))
3455 return svm_handle_invalid_exit(vcpu
, exit_code
);
3457 #ifdef CONFIG_RETPOLINE
3458 if (exit_code
== SVM_EXIT_MSR
)
3459 return msr_interception(vcpu
);
3460 else if (exit_code
== SVM_EXIT_VINTR
)
3461 return interrupt_window_interception(vcpu
);
3462 else if (exit_code
== SVM_EXIT_INTR
)
3463 return intr_interception(vcpu
);
3464 else if (exit_code
== SVM_EXIT_HLT
)
3465 return kvm_emulate_halt(vcpu
);
3466 else if (exit_code
== SVM_EXIT_NPF
)
3467 return npf_interception(vcpu
);
3469 return svm_exit_handlers
[exit_code
](vcpu
);
3472 static void svm_get_exit_info(struct kvm_vcpu
*vcpu
, u32
*reason
,
3473 u64
*info1
, u64
*info2
,
3474 u32
*intr_info
, u32
*error_code
)
3476 struct vmcb_control_area
*control
= &to_svm(vcpu
)->vmcb
->control
;
3478 *reason
= control
->exit_code
;
3479 *info1
= control
->exit_info_1
;
3480 *info2
= control
->exit_info_2
;
3481 *intr_info
= control
->exit_int_info
;
3482 if ((*intr_info
& SVM_EXITINTINFO_VALID
) &&
3483 (*intr_info
& SVM_EXITINTINFO_VALID_ERR
))
3484 *error_code
= control
->exit_int_info_err
;
3489 static int svm_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
)
3491 struct vcpu_svm
*svm
= to_svm(vcpu
);
3492 struct kvm_run
*kvm_run
= vcpu
->run
;
3493 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
3495 /* SEV-ES guests must use the CR write traps to track CR registers. */
3496 if (!sev_es_guest(vcpu
->kvm
)) {
3497 if (!svm_is_intercept(svm
, INTERCEPT_CR0_WRITE
))
3498 vcpu
->arch
.cr0
= svm
->vmcb
->save
.cr0
;
3500 vcpu
->arch
.cr3
= svm
->vmcb
->save
.cr3
;
3503 if (is_guest_mode(vcpu
)) {
3506 trace_kvm_nested_vmexit(vcpu
, KVM_ISA_SVM
);
3508 vmexit
= nested_svm_exit_special(svm
);
3510 if (vmexit
== NESTED_EXIT_CONTINUE
)
3511 vmexit
= nested_svm_exit_handled(svm
);
3513 if (vmexit
== NESTED_EXIT_DONE
)
3517 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
3518 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
3519 kvm_run
->fail_entry
.hardware_entry_failure_reason
3520 = svm
->vmcb
->control
.exit_code
;
3521 kvm_run
->fail_entry
.cpu
= vcpu
->arch
.last_vmentry_cpu
;
3526 if (exit_fastpath
!= EXIT_FASTPATH_NONE
)
3529 return svm_invoke_exit_handler(vcpu
, exit_code
);
3532 static void pre_svm_run(struct kvm_vcpu
*vcpu
)
3534 struct svm_cpu_data
*sd
= per_cpu_ptr(&svm_data
, vcpu
->cpu
);
3535 struct vcpu_svm
*svm
= to_svm(vcpu
);
3538 * If the previous vmrun of the vmcb occurred on a different physical
3539 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3540 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3542 if (unlikely(svm
->current_vmcb
->cpu
!= vcpu
->cpu
)) {
3543 svm
->current_vmcb
->asid_generation
= 0;
3544 vmcb_mark_all_dirty(svm
->vmcb
);
3545 svm
->current_vmcb
->cpu
= vcpu
->cpu
;
3548 if (sev_guest(vcpu
->kvm
))
3549 return pre_sev_run(svm
, vcpu
->cpu
);
3551 /* FIXME: handle wraparound of asid_generation */
3552 if (svm
->current_vmcb
->asid_generation
!= sd
->asid_generation
)
3556 static void svm_inject_nmi(struct kvm_vcpu
*vcpu
)
3558 struct vcpu_svm
*svm
= to_svm(vcpu
);
3560 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_NMI
;
3562 if (svm
->nmi_l1_to_l2
)
3565 svm
->nmi_masked
= true;
3566 svm_set_iret_intercept(svm
);
3567 ++vcpu
->stat
.nmi_injections
;
3570 static bool svm_is_vnmi_pending(struct kvm_vcpu
*vcpu
)
3572 struct vcpu_svm
*svm
= to_svm(vcpu
);
3574 if (!is_vnmi_enabled(svm
))
3577 return !!(svm
->vmcb
->control
.int_ctl
& V_NMI_PENDING_MASK
);
3580 static bool svm_set_vnmi_pending(struct kvm_vcpu
*vcpu
)
3582 struct vcpu_svm
*svm
= to_svm(vcpu
);
3584 if (!is_vnmi_enabled(svm
))
3587 if (svm
->vmcb
->control
.int_ctl
& V_NMI_PENDING_MASK
)
3590 svm
->vmcb
->control
.int_ctl
|= V_NMI_PENDING_MASK
;
3591 vmcb_mark_dirty(svm
->vmcb
, VMCB_INTR
);
3594 * Because the pending NMI is serviced by hardware, KVM can't know when
3595 * the NMI is "injected", but for all intents and purposes, passing the
3596 * NMI off to hardware counts as injection.
3598 ++vcpu
->stat
.nmi_injections
;
3603 static void svm_inject_irq(struct kvm_vcpu
*vcpu
, bool reinjected
)
3605 struct vcpu_svm
*svm
= to_svm(vcpu
);
3608 if (vcpu
->arch
.interrupt
.soft
) {
3609 if (svm_update_soft_interrupt_rip(vcpu
))
3612 type
= SVM_EVTINJ_TYPE_SOFT
;
3614 type
= SVM_EVTINJ_TYPE_INTR
;
3617 trace_kvm_inj_virq(vcpu
->arch
.interrupt
.nr
,
3618 vcpu
->arch
.interrupt
.soft
, reinjected
);
3619 ++vcpu
->stat
.irq_injections
;
3621 svm
->vmcb
->control
.event_inj
= vcpu
->arch
.interrupt
.nr
|
3622 SVM_EVTINJ_VALID
| type
;
3625 void svm_complete_interrupt_delivery(struct kvm_vcpu
*vcpu
, int delivery_mode
,
3626 int trig_mode
, int vector
)
3629 * apic->apicv_active must be read after vcpu->mode.
3630 * Pairs with smp_store_release in vcpu_enter_guest.
3632 bool in_guest_mode
= (smp_load_acquire(&vcpu
->mode
) == IN_GUEST_MODE
);
3634 /* Note, this is called iff the local APIC is in-kernel. */
3635 if (!READ_ONCE(vcpu
->arch
.apic
->apicv_active
)) {
3636 /* Process the interrupt via kvm_check_and_inject_events(). */
3637 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3638 kvm_vcpu_kick(vcpu
);
3642 trace_kvm_apicv_accept_irq(vcpu
->vcpu_id
, delivery_mode
, trig_mode
, vector
);
3643 if (in_guest_mode
) {
3645 * Signal the doorbell to tell hardware to inject the IRQ. If
3646 * the vCPU exits the guest before the doorbell chimes, hardware
3647 * will automatically process AVIC interrupts at the next VMRUN.
3649 avic_ring_doorbell(vcpu
);
3652 * Wake the vCPU if it was blocking. KVM will then detect the
3653 * pending IRQ when checking if the vCPU has a wake event.
3655 kvm_vcpu_wake_up(vcpu
);
3659 static void svm_deliver_interrupt(struct kvm_lapic
*apic
, int delivery_mode
,
3660 int trig_mode
, int vector
)
3662 kvm_lapic_set_irr(vector
, apic
);
3665 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3666 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3667 * the read of guest_mode. This guarantees that either VMRUN will see
3668 * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3669 * will signal the doorbell if the CPU has already entered the guest.
3671 smp_mb__after_atomic();
3672 svm_complete_interrupt_delivery(apic
->vcpu
, delivery_mode
, trig_mode
, vector
);
3675 static void svm_update_cr8_intercept(struct kvm_vcpu
*vcpu
, int tpr
, int irr
)
3677 struct vcpu_svm
*svm
= to_svm(vcpu
);
3680 * SEV-ES guests must always keep the CR intercepts cleared. CR
3681 * tracking is done using the CR write traps.
3683 if (sev_es_guest(vcpu
->kvm
))
3686 if (nested_svm_virtualize_tpr(vcpu
))
3689 svm_clr_intercept(svm
, INTERCEPT_CR8_WRITE
);
3695 svm_set_intercept(svm
, INTERCEPT_CR8_WRITE
);
3698 static bool svm_get_nmi_mask(struct kvm_vcpu
*vcpu
)
3700 struct vcpu_svm
*svm
= to_svm(vcpu
);
3702 if (is_vnmi_enabled(svm
))
3703 return svm
->vmcb
->control
.int_ctl
& V_NMI_BLOCKING_MASK
;
3705 return svm
->nmi_masked
;
3708 static void svm_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
)
3710 struct vcpu_svm
*svm
= to_svm(vcpu
);
3712 if (is_vnmi_enabled(svm
)) {
3714 svm
->vmcb
->control
.int_ctl
|= V_NMI_BLOCKING_MASK
;
3716 svm
->vmcb
->control
.int_ctl
&= ~V_NMI_BLOCKING_MASK
;
3719 svm
->nmi_masked
= masked
;
3721 svm_set_iret_intercept(svm
);
3723 svm_clr_iret_intercept(svm
);
3727 bool svm_nmi_blocked(struct kvm_vcpu
*vcpu
)
3729 struct vcpu_svm
*svm
= to_svm(vcpu
);
3730 struct vmcb
*vmcb
= svm
->vmcb
;
3735 if (is_guest_mode(vcpu
) && nested_exit_on_nmi(svm
))
3738 if (svm_get_nmi_mask(vcpu
))
3741 return vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
;
3744 static int svm_nmi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
3746 struct vcpu_svm
*svm
= to_svm(vcpu
);
3747 if (svm
->nested
.nested_run_pending
)
3750 if (svm_nmi_blocked(vcpu
))
3753 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
3754 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_nmi(svm
))
3759 bool svm_interrupt_blocked(struct kvm_vcpu
*vcpu
)
3761 struct vcpu_svm
*svm
= to_svm(vcpu
);
3762 struct vmcb
*vmcb
= svm
->vmcb
;
3767 if (is_guest_mode(vcpu
)) {
3768 /* As long as interrupts are being delivered... */
3769 if ((svm
->nested
.ctl
.int_ctl
& V_INTR_MASKING_MASK
)
3770 ? !(svm
->vmcb01
.ptr
->save
.rflags
& X86_EFLAGS_IF
)
3771 : !(kvm_get_rflags(vcpu
) & X86_EFLAGS_IF
))
3774 /* ... vmexits aren't blocked by the interrupt shadow */
3775 if (nested_exit_on_intr(svm
))
3778 if (!svm_get_if_flag(vcpu
))
3782 return (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
);
3785 static int svm_interrupt_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
3787 struct vcpu_svm
*svm
= to_svm(vcpu
);
3789 if (svm
->nested
.nested_run_pending
)
3792 if (svm_interrupt_blocked(vcpu
))
3796 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3797 * e.g. if the IRQ arrived asynchronously after checking nested events.
3799 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_intr(svm
))
3805 static void svm_enable_irq_window(struct kvm_vcpu
*vcpu
)
3807 struct vcpu_svm
*svm
= to_svm(vcpu
);
3810 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3811 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3812 * get that intercept, this function will be called again though and
3813 * we'll get the vintr intercept. However, if the vGIF feature is
3814 * enabled, the STGI interception will not occur. Enable the irq
3815 * window under the assumption that the hardware will set the GIF.
3817 if (vgif
|| gif_set(svm
)) {
3819 * IRQ window is not needed when AVIC is enabled,
3820 * unless we have pending ExtINT since it cannot be injected
3821 * via AVIC. In such case, KVM needs to temporarily disable AVIC,
3822 * and fallback to injecting IRQ via V_IRQ.
3824 * If running nested, AVIC is already locally inhibited
3825 * on this vCPU, therefore there is no need to request
3826 * the VM wide AVIC inhibition.
3828 if (!is_guest_mode(vcpu
))
3829 kvm_set_apicv_inhibit(vcpu
->kvm
, APICV_INHIBIT_REASON_IRQWIN
);
3835 static void svm_enable_nmi_window(struct kvm_vcpu
*vcpu
)
3837 struct vcpu_svm
*svm
= to_svm(vcpu
);
3840 * KVM should never request an NMI window when vNMI is enabled, as KVM
3841 * allows at most one to-be-injected NMI and one pending NMI, i.e. if
3842 * two NMIs arrive simultaneously, KVM will inject one and set
3843 * V_NMI_PENDING for the other. WARN, but continue with the standard
3844 * single-step approach to try and salvage the pending NMI.
3846 WARN_ON_ONCE(is_vnmi_enabled(svm
));
3848 if (svm_get_nmi_mask(vcpu
) && !svm
->awaiting_iret_completion
)
3849 return; /* IRET will cause a vm exit */
3852 * SEV-ES guests are responsible for signaling when a vCPU is ready to
3853 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
3854 * KVM can't intercept and single-step IRET to detect when NMIs are
3855 * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE.
3857 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
3858 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
3859 * supported NAEs in the GHCB protocol.
3861 if (sev_es_guest(vcpu
->kvm
))
3864 if (!gif_set(svm
)) {
3866 svm_set_intercept(svm
, INTERCEPT_STGI
);
3867 return; /* STGI will cause a vm exit */
3871 * Something prevents NMI from been injected. Single step over possible
3872 * problem (IRET or exception injection or interrupt shadow)
3874 svm
->nmi_singlestep_guest_rflags
= svm_get_rflags(vcpu
);
3875 svm
->nmi_singlestep
= true;
3876 svm
->vmcb
->save
.rflags
|= (X86_EFLAGS_TF
| X86_EFLAGS_RF
);
3879 static void svm_flush_tlb_asid(struct kvm_vcpu
*vcpu
)
3881 struct vcpu_svm
*svm
= to_svm(vcpu
);
3884 * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
3885 * A TLB flush for the current ASID flushes both "host" and "guest" TLB
3886 * entries, and thus is a superset of Hyper-V's fine grained flushing.
3888 kvm_hv_vcpu_purge_flush_tlb(vcpu
);
3891 * Flush only the current ASID even if the TLB flush was invoked via
3892 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
3893 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3894 * unconditionally does a TLB flush on both nested VM-Enter and nested
3895 * VM-Exit (via kvm_mmu_reset_context()).
3897 if (static_cpu_has(X86_FEATURE_FLUSHBYASID
))
3898 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ASID
;
3900 svm
->current_vmcb
->asid_generation
--;
3903 static void svm_flush_tlb_current(struct kvm_vcpu
*vcpu
)
3905 hpa_t root_tdp
= vcpu
->arch
.mmu
->root
.hpa
;
3908 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
3909 * flush the NPT mappings via hypercall as flushing the ASID only
3910 * affects virtual to physical mappings, it does not invalidate guest
3911 * physical to host physical mappings.
3913 if (svm_hv_is_enlightened_tlb_enabled(vcpu
) && VALID_PAGE(root_tdp
))
3914 hyperv_flush_guest_mapping(root_tdp
);
3916 svm_flush_tlb_asid(vcpu
);
3919 static void svm_flush_tlb_all(struct kvm_vcpu
*vcpu
)
3922 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
3923 * flushes should be routed to hv_flush_remote_tlbs() without requesting
3924 * a "regular" remote flush. Reaching this point means either there's
3925 * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
3926 * which might be fatal to the guest. Yell, but try to recover.
3928 if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu
)))
3929 hv_flush_remote_tlbs(vcpu
->kvm
);
3931 svm_flush_tlb_asid(vcpu
);
3934 static void svm_flush_tlb_gva(struct kvm_vcpu
*vcpu
, gva_t gva
)
3936 struct vcpu_svm
*svm
= to_svm(vcpu
);
3938 invlpga(gva
, svm
->vmcb
->control
.asid
);
3941 static inline void sync_cr8_to_lapic(struct kvm_vcpu
*vcpu
)
3943 struct vcpu_svm
*svm
= to_svm(vcpu
);
3945 if (nested_svm_virtualize_tpr(vcpu
))
3948 if (!svm_is_intercept(svm
, INTERCEPT_CR8_WRITE
)) {
3949 int cr8
= svm
->vmcb
->control
.int_ctl
& V_TPR_MASK
;
3950 kvm_set_cr8(vcpu
, cr8
);
3954 static inline void sync_lapic_to_cr8(struct kvm_vcpu
*vcpu
)
3956 struct vcpu_svm
*svm
= to_svm(vcpu
);
3959 if (nested_svm_virtualize_tpr(vcpu
) ||
3960 kvm_vcpu_apicv_active(vcpu
))
3963 cr8
= kvm_get_cr8(vcpu
);
3964 svm
->vmcb
->control
.int_ctl
&= ~V_TPR_MASK
;
3965 svm
->vmcb
->control
.int_ctl
|= cr8
& V_TPR_MASK
;
3968 static void svm_complete_soft_interrupt(struct kvm_vcpu
*vcpu
, u8 vector
,
3971 bool is_exception
= (type
== SVM_EXITINTINFO_TYPE_EXEPT
);
3972 bool is_soft
= (type
== SVM_EXITINTINFO_TYPE_SOFT
);
3973 struct vcpu_svm
*svm
= to_svm(vcpu
);
3976 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
3977 * associated with the original soft exception/interrupt. next_rip is
3978 * cleared on all exits that can occur while vectoring an event, so KVM
3979 * needs to manually set next_rip for re-injection. Unlike the !nrips
3980 * case below, this needs to be done if and only if KVM is re-injecting
3981 * the same event, i.e. if the event is a soft exception/interrupt,
3982 * otherwise next_rip is unused on VMRUN.
3984 if (nrips
&& (is_soft
|| (is_exception
&& kvm_exception_is_soft(vector
))) &&
3985 kvm_is_linear_rip(vcpu
, svm
->soft_int_old_rip
+ svm
->soft_int_csbase
))
3986 svm
->vmcb
->control
.next_rip
= svm
->soft_int_next_rip
;
3988 * If NRIPS isn't enabled, KVM must manually advance RIP prior to
3989 * injecting the soft exception/interrupt. That advancement needs to
3990 * be unwound if vectoring didn't complete. Note, the new event may
3991 * not be the injected event, e.g. if KVM injected an INTn, the INTn
3992 * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
3993 * be the reported vectored event, but RIP still needs to be unwound.
3995 else if (!nrips
&& (is_soft
|| is_exception
) &&
3996 kvm_is_linear_rip(vcpu
, svm
->soft_int_next_rip
+ svm
->soft_int_csbase
))
3997 kvm_rip_write(vcpu
, svm
->soft_int_old_rip
);
4000 static void svm_complete_interrupts(struct kvm_vcpu
*vcpu
)
4002 struct vcpu_svm
*svm
= to_svm(vcpu
);
4005 u32 exitintinfo
= svm
->vmcb
->control
.exit_int_info
;
4006 bool nmi_l1_to_l2
= svm
->nmi_l1_to_l2
;
4007 bool soft_int_injected
= svm
->soft_int_injected
;
4009 svm
->nmi_l1_to_l2
= false;
4010 svm
->soft_int_injected
= false;
4013 * If we've made progress since setting awaiting_iret_completion, we've
4014 * executed an IRET and can allow NMI injection.
4016 if (svm
->awaiting_iret_completion
&&
4017 kvm_rip_read(vcpu
) != svm
->nmi_iret_rip
) {
4018 svm
->awaiting_iret_completion
= false;
4019 svm
->nmi_masked
= false;
4020 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4023 vcpu
->arch
.nmi_injected
= false;
4024 kvm_clear_exception_queue(vcpu
);
4025 kvm_clear_interrupt_queue(vcpu
);
4027 if (!(exitintinfo
& SVM_EXITINTINFO_VALID
))
4030 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4032 vector
= exitintinfo
& SVM_EXITINTINFO_VEC_MASK
;
4033 type
= exitintinfo
& SVM_EXITINTINFO_TYPE_MASK
;
4035 if (soft_int_injected
)
4036 svm_complete_soft_interrupt(vcpu
, vector
, type
);
4039 case SVM_EXITINTINFO_TYPE_NMI
:
4040 vcpu
->arch
.nmi_injected
= true;
4041 svm
->nmi_l1_to_l2
= nmi_l1_to_l2
;
4043 case SVM_EXITINTINFO_TYPE_EXEPT
:
4045 * Never re-inject a #VC exception.
4047 if (vector
== X86_TRAP_VC
)
4050 if (exitintinfo
& SVM_EXITINTINFO_VALID_ERR
) {
4051 u32 err
= svm
->vmcb
->control
.exit_int_info_err
;
4052 kvm_requeue_exception_e(vcpu
, vector
, err
);
4055 kvm_requeue_exception(vcpu
, vector
);
4057 case SVM_EXITINTINFO_TYPE_INTR
:
4058 kvm_queue_interrupt(vcpu
, vector
, false);
4060 case SVM_EXITINTINFO_TYPE_SOFT
:
4061 kvm_queue_interrupt(vcpu
, vector
, true);
4069 static void svm_cancel_injection(struct kvm_vcpu
*vcpu
)
4071 struct vcpu_svm
*svm
= to_svm(vcpu
);
4072 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
4074 control
->exit_int_info
= control
->event_inj
;
4075 control
->exit_int_info_err
= control
->event_inj_err
;
4076 control
->event_inj
= 0;
4077 svm_complete_interrupts(vcpu
);
4080 static int svm_vcpu_pre_run(struct kvm_vcpu
*vcpu
)
4085 static fastpath_t
svm_exit_handlers_fastpath(struct kvm_vcpu
*vcpu
)
4087 if (to_svm(vcpu
)->vmcb
->control
.exit_code
== SVM_EXIT_MSR
&&
4088 to_svm(vcpu
)->vmcb
->control
.exit_info_1
)
4089 return handle_fastpath_set_msr_irqoff(vcpu
);
4091 return EXIT_FASTPATH_NONE
;
4094 static noinstr
void svm_vcpu_enter_exit(struct kvm_vcpu
*vcpu
, bool spec_ctrl_intercepted
)
4096 struct vcpu_svm
*svm
= to_svm(vcpu
);
4098 guest_state_enter_irqoff();
4100 amd_clear_divider();
4102 if (sev_es_guest(vcpu
->kvm
))
4103 __svm_sev_es_vcpu_run(svm
, spec_ctrl_intercepted
);
4105 __svm_vcpu_run(svm
, spec_ctrl_intercepted
);
4107 guest_state_exit_irqoff();
4110 static __no_kcsan fastpath_t
svm_vcpu_run(struct kvm_vcpu
*vcpu
)
4112 struct vcpu_svm
*svm
= to_svm(vcpu
);
4113 bool spec_ctrl_intercepted
= msr_write_intercepted(vcpu
, MSR_IA32_SPEC_CTRL
);
4115 trace_kvm_entry(vcpu
);
4117 svm
->vmcb
->save
.rax
= vcpu
->arch
.regs
[VCPU_REGS_RAX
];
4118 svm
->vmcb
->save
.rsp
= vcpu
->arch
.regs
[VCPU_REGS_RSP
];
4119 svm
->vmcb
->save
.rip
= vcpu
->arch
.regs
[VCPU_REGS_RIP
];
4122 * Disable singlestep if we're injecting an interrupt/exception.
4123 * We don't want our modified rflags to be pushed on the stack where
4124 * we might not be able to easily reset them if we disabled NMI
4127 if (svm
->nmi_singlestep
&& svm
->vmcb
->control
.event_inj
) {
4129 * Event injection happens before external interrupts cause a
4130 * vmexit and interrupts are disabled here, so smp_send_reschedule
4131 * is enough to force an immediate vmexit.
4133 disable_nmi_singlestep(svm
);
4134 smp_send_reschedule(vcpu
->cpu
);
4139 sync_lapic_to_cr8(vcpu
);
4141 if (unlikely(svm
->asid
!= svm
->vmcb
->control
.asid
)) {
4142 svm
->vmcb
->control
.asid
= svm
->asid
;
4143 vmcb_mark_dirty(svm
->vmcb
, VMCB_ASID
);
4145 svm
->vmcb
->save
.cr2
= vcpu
->arch
.cr2
;
4147 svm_hv_update_vp_id(svm
->vmcb
, vcpu
);
4150 * Run with all-zero DR6 unless needed, so that we can get the exact cause
4153 if (unlikely(vcpu
->arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
))
4154 svm_set_dr6(svm
, vcpu
->arch
.dr6
);
4156 svm_set_dr6(svm
, DR6_ACTIVE_LOW
);
4159 kvm_load_guest_xsave_state(vcpu
);
4161 kvm_wait_lapic_expire(vcpu
);
4164 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4165 * it's non-zero. Since vmentry is serialising on affected CPUs, there
4166 * is no need to worry about the conditional branch over the wrmsr
4167 * being speculatively taken.
4169 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
4170 x86_spec_ctrl_set_guest(svm
->virt_spec_ctrl
);
4172 svm_vcpu_enter_exit(vcpu
, spec_ctrl_intercepted
);
4174 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL
))
4175 x86_spec_ctrl_restore_host(svm
->virt_spec_ctrl
);
4177 if (!sev_es_guest(vcpu
->kvm
)) {
4178 vcpu
->arch
.cr2
= svm
->vmcb
->save
.cr2
;
4179 vcpu
->arch
.regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
4180 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
4181 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = svm
->vmcb
->save
.rip
;
4183 vcpu
->arch
.regs_dirty
= 0;
4185 if (unlikely(svm
->vmcb
->control
.exit_code
== SVM_EXIT_NMI
))
4186 kvm_before_interrupt(vcpu
, KVM_HANDLING_NMI
);
4188 kvm_load_host_xsave_state(vcpu
);
4191 /* Any pending NMI will happen here */
4193 if (unlikely(svm
->vmcb
->control
.exit_code
== SVM_EXIT_NMI
))
4194 kvm_after_interrupt(vcpu
);
4196 sync_cr8_to_lapic(vcpu
);
4199 if (is_guest_mode(vcpu
)) {
4200 nested_sync_control_from_vmcb02(svm
);
4202 /* Track VMRUNs that have made past consistency checking */
4203 if (svm
->nested
.nested_run_pending
&&
4204 svm
->vmcb
->control
.exit_code
!= SVM_EXIT_ERR
)
4205 ++vcpu
->stat
.nested_run
;
4207 svm
->nested
.nested_run_pending
= 0;
4210 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
4211 vmcb_mark_all_clean(svm
->vmcb
);
4213 /* if exit due to PF check for async PF */
4214 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
4215 vcpu
->arch
.apf
.host_apf_flags
=
4216 kvm_read_and_reset_apf_flags();
4218 vcpu
->arch
.regs_avail
&= ~SVM_REGS_LAZY_LOAD_SET
;
4221 * We need to handle MC intercepts here before the vcpu has a chance to
4222 * change the physical cpu
4224 if (unlikely(svm
->vmcb
->control
.exit_code
==
4225 SVM_EXIT_EXCP_BASE
+ MC_VECTOR
))
4226 svm_handle_mce(vcpu
);
4228 trace_kvm_exit(vcpu
, KVM_ISA_SVM
);
4230 svm_complete_interrupts(vcpu
);
4232 if (is_guest_mode(vcpu
))
4233 return EXIT_FASTPATH_NONE
;
4235 return svm_exit_handlers_fastpath(vcpu
);
4238 static void svm_load_mmu_pgd(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
,
4241 struct vcpu_svm
*svm
= to_svm(vcpu
);
4245 svm
->vmcb
->control
.nested_cr3
= __sme_set(root_hpa
);
4246 vmcb_mark_dirty(svm
->vmcb
, VMCB_NPT
);
4248 hv_track_root_tdp(vcpu
, root_hpa
);
4250 cr3
= vcpu
->arch
.cr3
;
4251 } else if (root_level
>= PT64_ROOT_4LEVEL
) {
4252 cr3
= __sme_set(root_hpa
) | kvm_get_active_pcid(vcpu
);
4254 /* PCID in the guest should be impossible with a 32-bit MMU. */
4255 WARN_ON_ONCE(kvm_get_active_pcid(vcpu
));
4259 svm
->vmcb
->save
.cr3
= cr3
;
4260 vmcb_mark_dirty(svm
->vmcb
, VMCB_CR
);
4264 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
4267 * Patch in the VMMCALL instruction:
4269 hypercall
[0] = 0x0f;
4270 hypercall
[1] = 0x01;
4271 hypercall
[2] = 0xd9;
4275 * The kvm parameter can be NULL (module initialization, or invocation before
4276 * VM creation). Be sure to check the kvm parameter before using it.
4278 static bool svm_has_emulated_msr(struct kvm
*kvm
, u32 index
)
4281 case MSR_IA32_MCG_EXT_CTL
:
4282 case KVM_FIRST_EMULATED_VMX_MSR
... KVM_LAST_EMULATED_VMX_MSR
:
4284 case MSR_IA32_SMBASE
:
4285 if (!IS_ENABLED(CONFIG_KVM_SMM
))
4287 /* SEV-ES guests do not support SMM, so report false */
4288 if (kvm
&& sev_es_guest(kvm
))
4298 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu
*vcpu
)
4300 struct vcpu_svm
*svm
= to_svm(vcpu
);
4303 * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
4304 * can only disable all variants of by disallowing CR4.OSXSAVE from
4305 * being set. As a result, if the host has XSAVE and XSAVES, and the
4306 * guest has XSAVE enabled, the guest can execute XSAVES without
4307 * faulting. Treat XSAVES as enabled in this case regardless of
4308 * whether it's advertised to the guest so that KVM context switches
4309 * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
4310 * the guest read/write access to the host's XSS.
4312 if (boot_cpu_has(X86_FEATURE_XSAVE
) &&
4313 boot_cpu_has(X86_FEATURE_XSAVES
) &&
4314 guest_cpuid_has(vcpu
, X86_FEATURE_XSAVE
))
4315 kvm_governed_feature_set(vcpu
, X86_FEATURE_XSAVES
);
4317 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_NRIPS
);
4318 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_TSCRATEMSR
);
4319 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_LBRV
);
4322 * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that
4323 * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
4324 * SVM on Intel is bonkers and extremely unlikely to work).
4326 if (!guest_cpuid_is_intel(vcpu
))
4327 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_V_VMSAVE_VMLOAD
);
4329 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_PAUSEFILTER
);
4330 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_PFTHRESHOLD
);
4331 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_VGIF
);
4332 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_VNMI
);
4334 svm_recalc_instruction_intercepts(vcpu
, svm
);
4336 if (boot_cpu_has(X86_FEATURE_IBPB
))
4337 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_PRED_CMD
, 0,
4338 !!guest_has_pred_cmd_msr(vcpu
));
4340 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
4341 set_msr_interception(vcpu
, svm
->msrpm
, MSR_IA32_FLUSH_CMD
, 0,
4342 !!guest_cpuid_has(vcpu
, X86_FEATURE_FLUSH_L1D
));
4344 if (sev_guest(vcpu
->kvm
))
4345 sev_vcpu_after_set_cpuid(svm
);
4347 init_vmcb_after_set_cpuid(vcpu
);
4350 static bool svm_has_wbinvd_exit(void)
4355 #define PRE_EX(exit) { .exit_code = (exit), \
4356 .stage = X86_ICPT_PRE_EXCEPT, }
4357 #define POST_EX(exit) { .exit_code = (exit), \
4358 .stage = X86_ICPT_POST_EXCEPT, }
4359 #define POST_MEM(exit) { .exit_code = (exit), \
4360 .stage = X86_ICPT_POST_MEMACCESS, }
4362 static const struct __x86_intercept
{
4364 enum x86_intercept_stage stage
;
4365 } x86_intercept_map
[] = {
4366 [x86_intercept_cr_read
] = POST_EX(SVM_EXIT_READ_CR0
),
4367 [x86_intercept_cr_write
] = POST_EX(SVM_EXIT_WRITE_CR0
),
4368 [x86_intercept_clts
] = POST_EX(SVM_EXIT_WRITE_CR0
),
4369 [x86_intercept_lmsw
] = POST_EX(SVM_EXIT_WRITE_CR0
),
4370 [x86_intercept_smsw
] = POST_EX(SVM_EXIT_READ_CR0
),
4371 [x86_intercept_dr_read
] = POST_EX(SVM_EXIT_READ_DR0
),
4372 [x86_intercept_dr_write
] = POST_EX(SVM_EXIT_WRITE_DR0
),
4373 [x86_intercept_sldt
] = POST_EX(SVM_EXIT_LDTR_READ
),
4374 [x86_intercept_str
] = POST_EX(SVM_EXIT_TR_READ
),
4375 [x86_intercept_lldt
] = POST_EX(SVM_EXIT_LDTR_WRITE
),
4376 [x86_intercept_ltr
] = POST_EX(SVM_EXIT_TR_WRITE
),
4377 [x86_intercept_sgdt
] = POST_EX(SVM_EXIT_GDTR_READ
),
4378 [x86_intercept_sidt
] = POST_EX(SVM_EXIT_IDTR_READ
),
4379 [x86_intercept_lgdt
] = POST_EX(SVM_EXIT_GDTR_WRITE
),
4380 [x86_intercept_lidt
] = POST_EX(SVM_EXIT_IDTR_WRITE
),
4381 [x86_intercept_vmrun
] = POST_EX(SVM_EXIT_VMRUN
),
4382 [x86_intercept_vmmcall
] = POST_EX(SVM_EXIT_VMMCALL
),
4383 [x86_intercept_vmload
] = POST_EX(SVM_EXIT_VMLOAD
),
4384 [x86_intercept_vmsave
] = POST_EX(SVM_EXIT_VMSAVE
),
4385 [x86_intercept_stgi
] = POST_EX(SVM_EXIT_STGI
),
4386 [x86_intercept_clgi
] = POST_EX(SVM_EXIT_CLGI
),
4387 [x86_intercept_skinit
] = POST_EX(SVM_EXIT_SKINIT
),
4388 [x86_intercept_invlpga
] = POST_EX(SVM_EXIT_INVLPGA
),
4389 [x86_intercept_rdtscp
] = POST_EX(SVM_EXIT_RDTSCP
),
4390 [x86_intercept_monitor
] = POST_MEM(SVM_EXIT_MONITOR
),
4391 [x86_intercept_mwait
] = POST_EX(SVM_EXIT_MWAIT
),
4392 [x86_intercept_invlpg
] = POST_EX(SVM_EXIT_INVLPG
),
4393 [x86_intercept_invd
] = POST_EX(SVM_EXIT_INVD
),
4394 [x86_intercept_wbinvd
] = POST_EX(SVM_EXIT_WBINVD
),
4395 [x86_intercept_wrmsr
] = POST_EX(SVM_EXIT_MSR
),
4396 [x86_intercept_rdtsc
] = POST_EX(SVM_EXIT_RDTSC
),
4397 [x86_intercept_rdmsr
] = POST_EX(SVM_EXIT_MSR
),
4398 [x86_intercept_rdpmc
] = POST_EX(SVM_EXIT_RDPMC
),
4399 [x86_intercept_cpuid
] = PRE_EX(SVM_EXIT_CPUID
),
4400 [x86_intercept_rsm
] = PRE_EX(SVM_EXIT_RSM
),
4401 [x86_intercept_pause
] = PRE_EX(SVM_EXIT_PAUSE
),
4402 [x86_intercept_pushf
] = PRE_EX(SVM_EXIT_PUSHF
),
4403 [x86_intercept_popf
] = PRE_EX(SVM_EXIT_POPF
),
4404 [x86_intercept_intn
] = PRE_EX(SVM_EXIT_SWINT
),
4405 [x86_intercept_iret
] = PRE_EX(SVM_EXIT_IRET
),
4406 [x86_intercept_icebp
] = PRE_EX(SVM_EXIT_ICEBP
),
4407 [x86_intercept_hlt
] = POST_EX(SVM_EXIT_HLT
),
4408 [x86_intercept_in
] = POST_EX(SVM_EXIT_IOIO
),
4409 [x86_intercept_ins
] = POST_EX(SVM_EXIT_IOIO
),
4410 [x86_intercept_out
] = POST_EX(SVM_EXIT_IOIO
),
4411 [x86_intercept_outs
] = POST_EX(SVM_EXIT_IOIO
),
4412 [x86_intercept_xsetbv
] = PRE_EX(SVM_EXIT_XSETBV
),
4419 static int svm_check_intercept(struct kvm_vcpu
*vcpu
,
4420 struct x86_instruction_info
*info
,
4421 enum x86_intercept_stage stage
,
4422 struct x86_exception
*exception
)
4424 struct vcpu_svm
*svm
= to_svm(vcpu
);
4425 int vmexit
, ret
= X86EMUL_CONTINUE
;
4426 struct __x86_intercept icpt_info
;
4427 struct vmcb
*vmcb
= svm
->vmcb
;
4429 if (info
->intercept
>= ARRAY_SIZE(x86_intercept_map
))
4432 icpt_info
= x86_intercept_map
[info
->intercept
];
4434 if (stage
!= icpt_info
.stage
)
4437 switch (icpt_info
.exit_code
) {
4438 case SVM_EXIT_READ_CR0
:
4439 if (info
->intercept
== x86_intercept_cr_read
)
4440 icpt_info
.exit_code
+= info
->modrm_reg
;
4442 case SVM_EXIT_WRITE_CR0
: {
4443 unsigned long cr0
, val
;
4445 if (info
->intercept
== x86_intercept_cr_write
)
4446 icpt_info
.exit_code
+= info
->modrm_reg
;
4448 if (icpt_info
.exit_code
!= SVM_EXIT_WRITE_CR0
||
4449 info
->intercept
== x86_intercept_clts
)
4452 if (!(vmcb12_is_intercept(&svm
->nested
.ctl
,
4453 INTERCEPT_SELECTIVE_CR0
)))
4456 cr0
= vcpu
->arch
.cr0
& ~SVM_CR0_SELECTIVE_MASK
;
4457 val
= info
->src_val
& ~SVM_CR0_SELECTIVE_MASK
;
4459 if (info
->intercept
== x86_intercept_lmsw
) {
4462 /* lmsw can't clear PE - catch this here */
4463 if (cr0
& X86_CR0_PE
)
4468 icpt_info
.exit_code
= SVM_EXIT_CR0_SEL_WRITE
;
4472 case SVM_EXIT_READ_DR0
:
4473 case SVM_EXIT_WRITE_DR0
:
4474 icpt_info
.exit_code
+= info
->modrm_reg
;
4477 if (info
->intercept
== x86_intercept_wrmsr
)
4478 vmcb
->control
.exit_info_1
= 1;
4480 vmcb
->control
.exit_info_1
= 0;
4482 case SVM_EXIT_PAUSE
:
4484 * We get this for NOP only, but pause
4485 * is rep not, check this here
4487 if (info
->rep_prefix
!= REPE_PREFIX
)
4490 case SVM_EXIT_IOIO
: {
4494 if (info
->intercept
== x86_intercept_in
||
4495 info
->intercept
== x86_intercept_ins
) {
4496 exit_info
= ((info
->src_val
& 0xffff) << 16) |
4498 bytes
= info
->dst_bytes
;
4500 exit_info
= (info
->dst_val
& 0xffff) << 16;
4501 bytes
= info
->src_bytes
;
4504 if (info
->intercept
== x86_intercept_outs
||
4505 info
->intercept
== x86_intercept_ins
)
4506 exit_info
|= SVM_IOIO_STR_MASK
;
4508 if (info
->rep_prefix
)
4509 exit_info
|= SVM_IOIO_REP_MASK
;
4511 bytes
= min(bytes
, 4u);
4513 exit_info
|= bytes
<< SVM_IOIO_SIZE_SHIFT
;
4515 exit_info
|= (u32
)info
->ad_bytes
<< (SVM_IOIO_ASIZE_SHIFT
- 1);
4517 vmcb
->control
.exit_info_1
= exit_info
;
4518 vmcb
->control
.exit_info_2
= info
->next_rip
;
4526 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4527 if (static_cpu_has(X86_FEATURE_NRIPS
))
4528 vmcb
->control
.next_rip
= info
->next_rip
;
4529 vmcb
->control
.exit_code
= icpt_info
.exit_code
;
4530 vmexit
= nested_svm_exit_handled(svm
);
4532 ret
= (vmexit
== NESTED_EXIT_DONE
) ? X86EMUL_INTERCEPTED
4539 static void svm_handle_exit_irqoff(struct kvm_vcpu
*vcpu
)
4541 if (to_svm(vcpu
)->vmcb
->control
.exit_code
== SVM_EXIT_INTR
)
4542 vcpu
->arch
.at_instruction_boundary
= true;
4545 static void svm_sched_in(struct kvm_vcpu
*vcpu
, int cpu
)
4547 if (!kvm_pause_in_guest(vcpu
->kvm
))
4548 shrink_ple_window(vcpu
);
4551 static void svm_setup_mce(struct kvm_vcpu
*vcpu
)
4553 /* [63:9] are reserved. */
4554 vcpu
->arch
.mcg_cap
&= 0x1ff;
4557 #ifdef CONFIG_KVM_SMM
4558 bool svm_smi_blocked(struct kvm_vcpu
*vcpu
)
4560 struct vcpu_svm
*svm
= to_svm(vcpu
);
4562 /* Per APM Vol.2 15.22.2 "Response to SMI" */
4566 return is_smm(vcpu
);
4569 static int svm_smi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
4571 struct vcpu_svm
*svm
= to_svm(vcpu
);
4572 if (svm
->nested
.nested_run_pending
)
4575 if (svm_smi_blocked(vcpu
))
4578 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
4579 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_smi(svm
))
4585 static int svm_enter_smm(struct kvm_vcpu
*vcpu
, union kvm_smram
*smram
)
4587 struct vcpu_svm
*svm
= to_svm(vcpu
);
4588 struct kvm_host_map map_save
;
4591 if (!is_guest_mode(vcpu
))
4595 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is
4596 * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4599 if (!guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
4602 smram
->smram64
.svm_guest_flag
= 1;
4603 smram
->smram64
.svm_guest_vmcb_gpa
= svm
->nested
.vmcb12_gpa
;
4605 svm
->vmcb
->save
.rax
= vcpu
->arch
.regs
[VCPU_REGS_RAX
];
4606 svm
->vmcb
->save
.rsp
= vcpu
->arch
.regs
[VCPU_REGS_RSP
];
4607 svm
->vmcb
->save
.rip
= vcpu
->arch
.regs
[VCPU_REGS_RIP
];
4609 ret
= nested_svm_simple_vmexit(svm
, SVM_EXIT_SW
);
4614 * KVM uses VMCB01 to store L1 host state while L2 runs but
4615 * VMCB01 is going to be used during SMM and thus the state will
4616 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4617 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4618 * format of the area is identical to guest save area offsetted
4619 * by 0x400 (matches the offset of 'struct vmcb_save_area'
4620 * within 'struct vmcb'). Note: HSAVE area may also be used by
4621 * L1 hypervisor to save additional host context (e.g. KVM does
4622 * that, see svm_prepare_switch_to_guest()) which must be
4625 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(svm
->nested
.hsave_msr
), &map_save
))
4628 BUILD_BUG_ON(offsetof(struct vmcb
, save
) != 0x400);
4630 svm_copy_vmrun_state(map_save
.hva
+ 0x400,
4631 &svm
->vmcb01
.ptr
->save
);
4633 kvm_vcpu_unmap(vcpu
, &map_save
, true);
4637 static int svm_leave_smm(struct kvm_vcpu
*vcpu
, const union kvm_smram
*smram
)
4639 struct vcpu_svm
*svm
= to_svm(vcpu
);
4640 struct kvm_host_map map
, map_save
;
4641 struct vmcb
*vmcb12
;
4644 const struct kvm_smram_state_64
*smram64
= &smram
->smram64
;
4646 if (!guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
4649 /* Non-zero if SMI arrived while vCPU was in guest mode. */
4650 if (!smram64
->svm_guest_flag
)
4653 if (!guest_cpuid_has(vcpu
, X86_FEATURE_SVM
))
4656 if (!(smram64
->efer
& EFER_SVME
))
4659 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(smram64
->svm_guest_vmcb_gpa
), &map
))
4663 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(svm
->nested
.hsave_msr
), &map_save
))
4666 if (svm_allocate_nested(svm
))
4670 * Restore L1 host state from L1 HSAVE area as VMCB01 was
4671 * used during SMM (see svm_enter_smm())
4674 svm_copy_vmrun_state(&svm
->vmcb01
.ptr
->save
, map_save
.hva
+ 0x400);
4677 * Enter the nested guest now
4680 vmcb_mark_all_dirty(svm
->vmcb01
.ptr
);
4683 nested_copy_vmcb_control_to_cache(svm
, &vmcb12
->control
);
4684 nested_copy_vmcb_save_to_cache(svm
, &vmcb12
->save
);
4685 ret
= enter_svm_guest_mode(vcpu
, smram64
->svm_guest_vmcb_gpa
, vmcb12
, false);
4690 svm
->nested
.nested_run_pending
= 1;
4693 kvm_vcpu_unmap(vcpu
, &map_save
, true);
4695 kvm_vcpu_unmap(vcpu
, &map
, true);
4699 static void svm_enable_smi_window(struct kvm_vcpu
*vcpu
)
4701 struct vcpu_svm
*svm
= to_svm(vcpu
);
4703 if (!gif_set(svm
)) {
4705 svm_set_intercept(svm
, INTERCEPT_STGI
);
4706 /* STGI will cause a vm exit */
4708 /* We must be in SMM; RSM will cause a vmexit anyway. */
4713 static int svm_check_emulate_instruction(struct kvm_vcpu
*vcpu
, int emul_type
,
4714 void *insn
, int insn_len
)
4716 bool smep
, smap
, is_user
;
4719 /* Emulation is always possible when KVM has access to all guest state. */
4720 if (!sev_guest(vcpu
->kvm
))
4721 return X86EMUL_CONTINUE
;
4723 /* #UD and #GP should never be intercepted for SEV guests. */
4724 WARN_ON_ONCE(emul_type
& (EMULTYPE_TRAP_UD
|
4725 EMULTYPE_TRAP_UD_FORCED
|
4726 EMULTYPE_VMWARE_GP
));
4729 * Emulation is impossible for SEV-ES guests as KVM doesn't have access
4730 * to guest register state.
4732 if (sev_es_guest(vcpu
->kvm
))
4733 return X86EMUL_RETRY_INSTR
;
4736 * Emulation is possible if the instruction is already decoded, e.g.
4737 * when completing I/O after returning from userspace.
4739 if (emul_type
& EMULTYPE_NO_DECODE
)
4740 return X86EMUL_CONTINUE
;
4743 * Emulation is possible for SEV guests if and only if a prefilled
4744 * buffer containing the bytes of the intercepted instruction is
4745 * available. SEV guest memory is encrypted with a guest specific key
4746 * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
4749 * If KVM is NOT trying to simply skip an instruction, inject #UD if
4750 * KVM reached this point without an instruction buffer. In practice,
4751 * this path should never be hit by a well-behaved guest, e.g. KVM
4752 * doesn't intercept #UD or #GP for SEV guests, but this path is still
4753 * theoretically reachable, e.g. via unaccelerated fault-like AVIC
4754 * access, and needs to be handled by KVM to avoid putting the guest
4755 * into an infinite loop. Injecting #UD is somewhat arbitrary, but
4756 * its the least awful option given lack of insight into the guest.
4758 * If KVM is trying to skip an instruction, simply resume the guest.
4759 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
4760 * will attempt to re-inject the INT3/INTO and skip the instruction.
4761 * In that scenario, retrying the INT3/INTO and hoping the guest will
4762 * make forward progress is the only option that has a chance of
4763 * success (and in practice it will work the vast majority of the time).
4765 if (unlikely(!insn
)) {
4766 if (emul_type
& EMULTYPE_SKIP
)
4767 return X86EMUL_UNHANDLEABLE
;
4769 kvm_queue_exception(vcpu
, UD_VECTOR
);
4770 return X86EMUL_PROPAGATE_FAULT
;
4774 * Emulate for SEV guests if the insn buffer is not empty. The buffer
4775 * will be empty if the DecodeAssist microcode cannot fetch bytes for
4776 * the faulting instruction because the code fetch itself faulted, e.g.
4777 * the guest attempted to fetch from emulated MMIO or a guest page
4778 * table used to translate CS:RIP resides in emulated MMIO.
4780 if (likely(insn_len
))
4781 return X86EMUL_CONTINUE
;
4784 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4787 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4788 * possible that CPU microcode implementing DecodeAssist will fail to
4789 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4790 * be '0'. This happens because microcode reads CS:RIP using a _data_
4791 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
4792 * gives up and does not fill the instruction bytes buffer.
4794 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4795 * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4796 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4797 * GuestIntrBytes field of the VMCB.
4799 * This does _not_ mean that the erratum has been encountered, as the
4800 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4801 * #PF, e.g. if the guest attempt to execute from emulated MMIO and
4802 * encountered a reserved/not-present #PF.
4804 * To hit the erratum, the following conditions must be true:
4805 * 1. CR4.SMAP=1 (obviously).
4806 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
4807 * have been hit as the guest would have encountered a SMEP
4808 * violation #PF, not a #NPF.
4809 * 3. The #NPF is not due to a code fetch, in which case failure to
4810 * retrieve the instruction bytes is legitimate (see abvoe).
4812 * In addition, don't apply the erratum workaround if the #NPF occurred
4813 * while translating guest page tables (see below).
4815 error_code
= to_svm(vcpu
)->vmcb
->control
.exit_info_1
;
4816 if (error_code
& (PFERR_GUEST_PAGE_MASK
| PFERR_FETCH_MASK
))
4819 smep
= kvm_is_cr4_bit_set(vcpu
, X86_CR4_SMEP
);
4820 smap
= kvm_is_cr4_bit_set(vcpu
, X86_CR4_SMAP
);
4821 is_user
= svm_get_cpl(vcpu
) == 3;
4822 if (smap
&& (!smep
|| is_user
)) {
4823 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
4826 * If the fault occurred in userspace, arbitrarily inject #GP
4827 * to avoid killing the guest and to hopefully avoid confusing
4828 * the guest kernel too much, e.g. injecting #PF would not be
4829 * coherent with respect to the guest's page tables. Request
4830 * triple fault if the fault occurred in the kernel as there's
4831 * no fault that KVM can inject without confusing the guest.
4832 * In practice, the triple fault is moot as no sane SEV kernel
4833 * will execute from user memory while also running with SMAP=1.
4836 kvm_inject_gp(vcpu
, 0);
4838 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
4839 return X86EMUL_PROPAGATE_FAULT
;
4844 * If the erratum was not hit, simply resume the guest and let it fault
4845 * again. While awful, e.g. the vCPU may get stuck in an infinite loop
4846 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to
4847 * userspace will kill the guest, and letting the emulator read garbage
4848 * will yield random behavior and potentially corrupt the guest.
4850 * Simply resuming the guest is technically not a violation of the SEV
4851 * architecture. AMD's APM states that all code fetches and page table
4852 * accesses for SEV guest are encrypted, regardless of the C-Bit. The
4853 * APM also states that encrypted accesses to MMIO are "ignored", but
4854 * doesn't explicitly define "ignored", i.e. doing nothing and letting
4855 * the guest spin is technically "ignoring" the access.
4857 return X86EMUL_RETRY_INSTR
;
4860 static bool svm_apic_init_signal_blocked(struct kvm_vcpu
*vcpu
)
4862 struct vcpu_svm
*svm
= to_svm(vcpu
);
4864 return !gif_set(svm
);
4867 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
)
4869 if (!sev_es_guest(vcpu
->kvm
))
4870 return kvm_vcpu_deliver_sipi_vector(vcpu
, vector
);
4872 sev_vcpu_deliver_sipi_vector(vcpu
, vector
);
4875 static void svm_vm_destroy(struct kvm
*kvm
)
4877 avic_vm_destroy(kvm
);
4878 sev_vm_destroy(kvm
);
4881 static int svm_vm_init(struct kvm
*kvm
)
4883 if (!pause_filter_count
|| !pause_filter_thresh
)
4884 kvm
->arch
.pause_in_guest
= true;
4887 int ret
= avic_vm_init(kvm
);
4895 static struct kvm_x86_ops svm_x86_ops __initdata
= {
4896 .name
= KBUILD_MODNAME
,
4898 .check_processor_compatibility
= svm_check_processor_compat
,
4900 .hardware_unsetup
= svm_hardware_unsetup
,
4901 .hardware_enable
= svm_hardware_enable
,
4902 .hardware_disable
= svm_hardware_disable
,
4903 .has_emulated_msr
= svm_has_emulated_msr
,
4905 .vcpu_create
= svm_vcpu_create
,
4906 .vcpu_free
= svm_vcpu_free
,
4907 .vcpu_reset
= svm_vcpu_reset
,
4909 .vm_size
= sizeof(struct kvm_svm
),
4910 .vm_init
= svm_vm_init
,
4911 .vm_destroy
= svm_vm_destroy
,
4913 .prepare_switch_to_guest
= svm_prepare_switch_to_guest
,
4914 .vcpu_load
= svm_vcpu_load
,
4915 .vcpu_put
= svm_vcpu_put
,
4916 .vcpu_blocking
= avic_vcpu_blocking
,
4917 .vcpu_unblocking
= avic_vcpu_unblocking
,
4919 .update_exception_bitmap
= svm_update_exception_bitmap
,
4920 .get_msr_feature
= svm_get_msr_feature
,
4921 .get_msr
= svm_get_msr
,
4922 .set_msr
= svm_set_msr
,
4923 .get_segment_base
= svm_get_segment_base
,
4924 .get_segment
= svm_get_segment
,
4925 .set_segment
= svm_set_segment
,
4926 .get_cpl
= svm_get_cpl
,
4927 .get_cs_db_l_bits
= svm_get_cs_db_l_bits
,
4928 .is_valid_cr0
= svm_is_valid_cr0
,
4929 .set_cr0
= svm_set_cr0
,
4930 .post_set_cr3
= sev_post_set_cr3
,
4931 .is_valid_cr4
= svm_is_valid_cr4
,
4932 .set_cr4
= svm_set_cr4
,
4933 .set_efer
= svm_set_efer
,
4934 .get_idt
= svm_get_idt
,
4935 .set_idt
= svm_set_idt
,
4936 .get_gdt
= svm_get_gdt
,
4937 .set_gdt
= svm_set_gdt
,
4938 .set_dr7
= svm_set_dr7
,
4939 .sync_dirty_debug_regs
= svm_sync_dirty_debug_regs
,
4940 .cache_reg
= svm_cache_reg
,
4941 .get_rflags
= svm_get_rflags
,
4942 .set_rflags
= svm_set_rflags
,
4943 .get_if_flag
= svm_get_if_flag
,
4945 .flush_tlb_all
= svm_flush_tlb_all
,
4946 .flush_tlb_current
= svm_flush_tlb_current
,
4947 .flush_tlb_gva
= svm_flush_tlb_gva
,
4948 .flush_tlb_guest
= svm_flush_tlb_asid
,
4950 .vcpu_pre_run
= svm_vcpu_pre_run
,
4951 .vcpu_run
= svm_vcpu_run
,
4952 .handle_exit
= svm_handle_exit
,
4953 .skip_emulated_instruction
= svm_skip_emulated_instruction
,
4954 .update_emulated_instruction
= NULL
,
4955 .set_interrupt_shadow
= svm_set_interrupt_shadow
,
4956 .get_interrupt_shadow
= svm_get_interrupt_shadow
,
4957 .patch_hypercall
= svm_patch_hypercall
,
4958 .inject_irq
= svm_inject_irq
,
4959 .inject_nmi
= svm_inject_nmi
,
4960 .is_vnmi_pending
= svm_is_vnmi_pending
,
4961 .set_vnmi_pending
= svm_set_vnmi_pending
,
4962 .inject_exception
= svm_inject_exception
,
4963 .cancel_injection
= svm_cancel_injection
,
4964 .interrupt_allowed
= svm_interrupt_allowed
,
4965 .nmi_allowed
= svm_nmi_allowed
,
4966 .get_nmi_mask
= svm_get_nmi_mask
,
4967 .set_nmi_mask
= svm_set_nmi_mask
,
4968 .enable_nmi_window
= svm_enable_nmi_window
,
4969 .enable_irq_window
= svm_enable_irq_window
,
4970 .update_cr8_intercept
= svm_update_cr8_intercept
,
4971 .set_virtual_apic_mode
= avic_refresh_virtual_apic_mode
,
4972 .refresh_apicv_exec_ctrl
= avic_refresh_apicv_exec_ctrl
,
4973 .apicv_post_state_restore
= avic_apicv_post_state_restore
,
4974 .required_apicv_inhibits
= AVIC_REQUIRED_APICV_INHIBITS
,
4976 .get_exit_info
= svm_get_exit_info
,
4978 .vcpu_after_set_cpuid
= svm_vcpu_after_set_cpuid
,
4980 .has_wbinvd_exit
= svm_has_wbinvd_exit
,
4982 .get_l2_tsc_offset
= svm_get_l2_tsc_offset
,
4983 .get_l2_tsc_multiplier
= svm_get_l2_tsc_multiplier
,
4984 .write_tsc_offset
= svm_write_tsc_offset
,
4985 .write_tsc_multiplier
= svm_write_tsc_multiplier
,
4987 .load_mmu_pgd
= svm_load_mmu_pgd
,
4989 .check_intercept
= svm_check_intercept
,
4990 .handle_exit_irqoff
= svm_handle_exit_irqoff
,
4992 .request_immediate_exit
= __kvm_request_immediate_exit
,
4994 .sched_in
= svm_sched_in
,
4996 .nested_ops
= &svm_nested_ops
,
4998 .deliver_interrupt
= svm_deliver_interrupt
,
4999 .pi_update_irte
= avic_pi_update_irte
,
5000 .setup_mce
= svm_setup_mce
,
5002 #ifdef CONFIG_KVM_SMM
5003 .smi_allowed
= svm_smi_allowed
,
5004 .enter_smm
= svm_enter_smm
,
5005 .leave_smm
= svm_leave_smm
,
5006 .enable_smi_window
= svm_enable_smi_window
,
5009 .mem_enc_ioctl
= sev_mem_enc_ioctl
,
5010 .mem_enc_register_region
= sev_mem_enc_register_region
,
5011 .mem_enc_unregister_region
= sev_mem_enc_unregister_region
,
5012 .guest_memory_reclaimed
= sev_guest_memory_reclaimed
,
5014 .vm_copy_enc_context_from
= sev_vm_copy_enc_context_from
,
5015 .vm_move_enc_context_from
= sev_vm_move_enc_context_from
,
5017 .check_emulate_instruction
= svm_check_emulate_instruction
,
5019 .apic_init_signal_blocked
= svm_apic_init_signal_blocked
,
5021 .msr_filter_changed
= svm_msr_filter_changed
,
5022 .complete_emulated_msr
= svm_complete_emulated_msr
,
5024 .vcpu_deliver_sipi_vector
= svm_vcpu_deliver_sipi_vector
,
5025 .vcpu_get_apicv_inhibit_reasons
= avic_vcpu_get_apicv_inhibit_reasons
,
5029 * The default MMIO mask is a single bit (excluding the present bit),
5030 * which could conflict with the memory encryption bit. Check for
5031 * memory encryption support and override the default MMIO mask if
5032 * memory encryption is enabled.
5034 static __init
void svm_adjust_mmio_mask(void)
5036 unsigned int enc_bit
, mask_bit
;
5039 /* If there is no memory encryption support, use existing mask */
5040 if (cpuid_eax(0x80000000) < 0x8000001f)
5043 /* If memory encryption is not enabled, use existing mask */
5044 rdmsrl(MSR_AMD64_SYSCFG
, msr
);
5045 if (!(msr
& MSR_AMD64_SYSCFG_MEM_ENCRYPT
))
5048 enc_bit
= cpuid_ebx(0x8000001f) & 0x3f;
5049 mask_bit
= boot_cpu_data
.x86_phys_bits
;
5051 /* Increment the mask bit if it is the same as the encryption bit */
5052 if (enc_bit
== mask_bit
)
5056 * If the mask bit location is below 52, then some bits above the
5057 * physical addressing limit will always be reserved, so use the
5058 * rsvd_bits() function to generate the mask. This mask, along with
5059 * the present bit, will be used to generate a page fault with
5062 * If the mask bit location is 52 (or above), then clear the mask.
5064 mask
= (mask_bit
< 52) ? rsvd_bits(mask_bit
, 51) | PT_PRESENT_MASK
: 0;
5066 kvm_mmu_set_mmio_spte_mask(mask
, mask
, PT_WRITABLE_MASK
| PT_USER_MASK
);
5069 static __init
void svm_set_cpu_caps(void)
5073 kvm_caps
.supported_perf_cap
= 0;
5074 kvm_caps
.supported_xss
= 0;
5076 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
5078 kvm_cpu_cap_set(X86_FEATURE_SVM
);
5079 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN
);
5082 kvm_cpu_cap_set(X86_FEATURE_NRIPS
);
5085 kvm_cpu_cap_set(X86_FEATURE_NPT
);
5088 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR
);
5091 kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD
);
5093 kvm_cpu_cap_set(X86_FEATURE_LBRV
);
5095 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER
))
5096 kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER
);
5098 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD
))
5099 kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD
);
5102 kvm_cpu_cap_set(X86_FEATURE_VGIF
);
5105 kvm_cpu_cap_set(X86_FEATURE_VNMI
);
5107 /* Nested VM can receive #VMEXIT instead of triggering #GP */
5108 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK
);
5111 /* CPUID 0x80000008 */
5112 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
) ||
5113 boot_cpu_has(X86_FEATURE_AMD_SSBD
))
5114 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD
);
5118 * Enumerate support for PERFCTR_CORE if and only if KVM has
5119 * access to enough counters to virtualize "core" support,
5120 * otherwise limit vPMU support to the legacy number of counters.
5122 if (kvm_pmu_cap
.num_counters_gp
< AMD64_NUM_COUNTERS_CORE
)
5123 kvm_pmu_cap
.num_counters_gp
= min(AMD64_NUM_COUNTERS
,
5124 kvm_pmu_cap
.num_counters_gp
);
5126 kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE
);
5128 if (kvm_pmu_cap
.version
!= 2 ||
5129 !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE
))
5130 kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2
);
5133 /* CPUID 0x8000001F (SME/SEV features) */
5137 static __init
int svm_hardware_setup(void)
5140 struct page
*iopm_pages
;
5143 unsigned int order
= get_order(IOPM_SIZE
);
5146 * NX is required for shadow paging and for NPT if the NX huge pages
5147 * mitigation is enabled.
5149 if (!boot_cpu_has(X86_FEATURE_NX
)) {
5150 pr_err_ratelimited("NX (Execute Disable) not supported\n");
5153 kvm_enable_efer_bits(EFER_NX
);
5155 iopm_pages
= alloc_pages(GFP_KERNEL
, order
);
5160 iopm_va
= page_address(iopm_pages
);
5161 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << order
));
5162 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
5164 init_msrpm_offsets();
5166 kvm_caps
.supported_xcr0
&= ~(XFEATURE_MASK_BNDREGS
|
5167 XFEATURE_MASK_BNDCSR
);
5169 if (boot_cpu_has(X86_FEATURE_FXSR_OPT
))
5170 kvm_enable_efer_bits(EFER_FFXSR
);
5173 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR
)) {
5174 tsc_scaling
= false;
5176 pr_info("TSC scaling supported\n");
5177 kvm_caps
.has_tsc_control
= true;
5180 kvm_caps
.max_tsc_scaling_ratio
= SVM_TSC_RATIO_MAX
;
5181 kvm_caps
.tsc_scaling_ratio_frac_bits
= 32;
5183 tsc_aux_uret_slot
= kvm_add_user_return_msr(MSR_TSC_AUX
);
5185 if (boot_cpu_has(X86_FEATURE_AUTOIBRS
))
5186 kvm_enable_efer_bits(EFER_AUTOIBRS
);
5188 /* Check for pause filtering support */
5189 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER
)) {
5190 pause_filter_count
= 0;
5191 pause_filter_thresh
= 0;
5192 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD
)) {
5193 pause_filter_thresh
= 0;
5197 pr_info("Nested Virtualization enabled\n");
5198 kvm_enable_efer_bits(EFER_SVME
| EFER_LMSLE
);
5202 * KVM's MMU doesn't support using 2-level paging for itself, and thus
5203 * NPT isn't supported if the host is using 2-level paging since host
5204 * CR4 is unchanged on VMRUN.
5206 if (!IS_ENABLED(CONFIG_X86_64
) && !IS_ENABLED(CONFIG_X86_PAE
))
5207 npt_enabled
= false;
5209 if (!boot_cpu_has(X86_FEATURE_NPT
))
5210 npt_enabled
= false;
5212 /* Force VM NPT level equal to the host's paging level */
5213 kvm_configure_mmu(npt_enabled
, get_npt_level(),
5214 get_npt_level(), PG_LEVEL_1G
);
5215 pr_info("Nested Paging %sabled\n", npt_enabled
? "en" : "dis");
5217 /* Setup shadow_me_value and shadow_me_mask */
5218 kvm_mmu_set_me_spte_mask(sme_me_mask
, sme_me_mask
);
5220 svm_adjust_mmio_mask();
5222 nrips
= nrips
&& boot_cpu_has(X86_FEATURE_NRIPS
);
5225 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5226 * may be modified by svm_adjust_mmio_mask()), as well as nrips.
5228 sev_hardware_setup();
5230 svm_hv_hardware_setup();
5232 for_each_possible_cpu(cpu
) {
5233 r
= svm_cpu_init(cpu
);
5238 enable_apicv
= avic
= avic
&& avic_hardware_setup();
5240 if (!enable_apicv
) {
5241 svm_x86_ops
.vcpu_blocking
= NULL
;
5242 svm_x86_ops
.vcpu_unblocking
= NULL
;
5243 svm_x86_ops
.vcpu_get_apicv_inhibit_reasons
= NULL
;
5244 } else if (!x2avic_enabled
) {
5245 svm_x86_ops
.allow_apicv_in_x2apic_without_x2apic_virtualization
= true;
5250 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD
) ||
5251 !IS_ENABLED(CONFIG_X86_64
)) {
5254 pr_info("Virtual VMLOAD VMSAVE supported\n");
5258 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK
))
5259 svm_gp_erratum_intercept
= false;
5262 if (!boot_cpu_has(X86_FEATURE_VGIF
))
5265 pr_info("Virtual GIF supported\n");
5268 vnmi
= vgif
&& vnmi
&& boot_cpu_has(X86_FEATURE_VNMI
);
5270 pr_info("Virtual NMI enabled\n");
5273 svm_x86_ops
.is_vnmi_pending
= NULL
;
5274 svm_x86_ops
.set_vnmi_pending
= NULL
;
5279 if (!boot_cpu_has(X86_FEATURE_LBRV
))
5282 pr_info("LBR virtualization supported\n");
5286 pr_info("PMU virtualization is disabled\n");
5291 * It seems that on AMD processors PTE's accessed bit is
5292 * being set by the CPU hardware before the NPF vmexit.
5293 * This is not expected behaviour and our tests fail because
5295 * A workaround here is to disable support for
5296 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5297 * In this case userspace can know if there is support using
5298 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5300 * If future AMD CPU models change the behaviour described above,
5301 * this variable can be changed accordingly
5303 allow_smaller_maxphyaddr
= !npt_enabled
;
5308 svm_hardware_unsetup();
5313 static struct kvm_x86_init_ops svm_init_ops __initdata
= {
5314 .hardware_setup
= svm_hardware_setup
,
5316 .runtime_ops
= &svm_x86_ops
,
5317 .pmu_ops
= &amd_pmu_ops
,
5320 static void __svm_exit(void)
5322 kvm_x86_vendor_exit();
5324 cpu_emergency_unregister_virt_callback(svm_emergency_disable
);
5327 static int __init
svm_init(void)
5331 __unused_size_checks();
5333 if (!kvm_is_svm_supported())
5336 r
= kvm_x86_vendor_init(&svm_init_ops
);
5340 cpu_emergency_register_virt_callback(svm_emergency_disable
);
5343 * Common KVM initialization _must_ come last, after this, /dev/kvm is
5344 * exposed to userspace!
5346 r
= kvm_init(sizeof(struct vcpu_svm
), __alignof__(struct vcpu_svm
),
5358 static void __exit
svm_exit(void)
5364 module_init(svm_init
)
5365 module_exit(svm_exit
)