1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * derived from drivers/kvm/kvm_main.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kvm_host.h>
26 #include "kvm_cache_regs.h"
27 #include "kvm_emulate.h"
28 #include "mmu/page_track.h"
37 #include <linux/clocksource.h>
38 #include <linux/interrupt.h>
39 #include <linux/kvm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/export.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mman.h>
45 #include <linux/highmem.h>
46 #include <linux/iommu.h>
47 #include <linux/cpufreq.h>
48 #include <linux/user-return-notifier.h>
49 #include <linux/srcu.h>
50 #include <linux/slab.h>
51 #include <linux/perf_event.h>
52 #include <linux/uaccess.h>
53 #include <linux/hash.h>
54 #include <linux/pci.h>
55 #include <linux/timekeeper_internal.h>
56 #include <linux/pvclock_gtod.h>
57 #include <linux/kvm_irqfd.h>
58 #include <linux/irqbypass.h>
59 #include <linux/sched/stat.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/mem_encrypt.h>
62 #include <linux/entry-kvm.h>
63 #include <linux/suspend.h>
64 #include <linux/smp.h>
66 #include <trace/events/ipi.h>
67 #include <trace/events/kvm.h>
69 #include <asm/debugreg.h>
74 #include <linux/kernel_stat.h>
75 #include <asm/fpu/api.h>
76 #include <asm/fpu/xcr.h>
77 #include <asm/fpu/xstate.h>
78 #include <asm/pvclock.h>
79 #include <asm/div64.h>
80 #include <asm/irq_remapping.h>
81 #include <asm/mshyperv.h>
82 #include <asm/hypervisor.h>
83 #include <asm/tlbflush.h>
84 #include <asm/intel_pt.h>
85 #include <asm/emulate_prefix.h>
87 #include <clocksource/hyperv_timer.h>
89 #define CREATE_TRACE_POINTS
92 #define MAX_IO_MSRS 256
93 #define KVM_MAX_MCE_BANKS 32
95 struct kvm_caps kvm_caps __read_mostly
= {
96 .supported_mce_cap
= MCG_CTL_P
| MCG_SER_P
,
98 EXPORT_SYMBOL_GPL(kvm_caps
);
100 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
102 #define emul_to_vcpu(ctxt) \
103 ((struct kvm_vcpu *)(ctxt)->vcpu)
106 * - enable syscall per default because its emulated by KVM
107 * - enable LME and LMA per default on 64 bit KVM
111 u64 __read_mostly efer_reserved_bits
= ~((u64
)(EFER_SCE
| EFER_LME
| EFER_LMA
));
113 static u64 __read_mostly efer_reserved_bits
= ~((u64
)EFER_SCE
);
116 static u64 __read_mostly cr4_reserved_bits
= CR4_RESERVED_BITS
;
118 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
120 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
122 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
123 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
125 static void update_cr8_intercept(struct kvm_vcpu
*vcpu
);
126 static void process_nmi(struct kvm_vcpu
*vcpu
);
127 static void __kvm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
128 static void store_regs(struct kvm_vcpu
*vcpu
);
129 static int sync_regs(struct kvm_vcpu
*vcpu
);
130 static int kvm_vcpu_do_singlestep(struct kvm_vcpu
*vcpu
);
132 static int __set_sregs2(struct kvm_vcpu
*vcpu
, struct kvm_sregs2
*sregs2
);
133 static void __get_sregs2(struct kvm_vcpu
*vcpu
, struct kvm_sregs2
*sregs2
);
135 static DEFINE_MUTEX(vendor_module_lock
);
136 struct kvm_x86_ops kvm_x86_ops __read_mostly
;
138 #define KVM_X86_OP(func) \
139 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
140 *(((struct kvm_x86_ops *)0)->func));
141 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
142 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
143 #include <asm/kvm-x86-ops.h>
144 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits
);
145 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg
);
147 static bool __read_mostly ignore_msrs
= 0;
148 module_param(ignore_msrs
, bool, 0644);
150 bool __read_mostly report_ignored_msrs
= true;
151 module_param(report_ignored_msrs
, bool, 0644);
152 EXPORT_SYMBOL_GPL(report_ignored_msrs
);
154 unsigned int min_timer_period_us
= 200;
155 module_param(min_timer_period_us
, uint
, 0644);
157 static bool __read_mostly kvmclock_periodic_sync
= true;
158 module_param(kvmclock_periodic_sync
, bool, 0444);
160 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
161 static u32 __read_mostly tsc_tolerance_ppm
= 250;
162 module_param(tsc_tolerance_ppm
, uint
, 0644);
165 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
166 * adaptive tuning starting from default advancement of 1000ns. '0' disables
167 * advancement entirely. Any other value is used as-is and disables adaptive
168 * tuning, i.e. allows privileged userspace to set an exact advancement time.
170 static int __read_mostly lapic_timer_advance_ns
= -1;
171 module_param(lapic_timer_advance_ns
, int, 0644);
173 static bool __read_mostly vector_hashing
= true;
174 module_param(vector_hashing
, bool, 0444);
176 bool __read_mostly enable_vmware_backdoor
= false;
177 module_param(enable_vmware_backdoor
, bool, 0444);
178 EXPORT_SYMBOL_GPL(enable_vmware_backdoor
);
181 * Flags to manipulate forced emulation behavior (any non-zero value will
182 * enable forced emulation).
184 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
185 static int __read_mostly force_emulation_prefix
;
186 module_param(force_emulation_prefix
, int, 0644);
188 int __read_mostly pi_inject_timer
= -1;
189 module_param(pi_inject_timer
, bint
, 0644);
191 /* Enable/disable PMU virtualization */
192 bool __read_mostly enable_pmu
= true;
193 EXPORT_SYMBOL_GPL(enable_pmu
);
194 module_param(enable_pmu
, bool, 0444);
196 bool __read_mostly eager_page_split
= true;
197 module_param(eager_page_split
, bool, 0644);
199 /* Enable/disable SMT_RSB bug mitigation */
200 static bool __read_mostly mitigate_smt_rsb
;
201 module_param(mitigate_smt_rsb
, bool, 0444);
204 * Restoring the host value for MSRs that are only consumed when running in
205 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
206 * returns to userspace, i.e. the kernel can run with the guest's value.
208 #define KVM_MAX_NR_USER_RETURN_MSRS 16
210 struct kvm_user_return_msrs
{
211 struct user_return_notifier urn
;
213 struct kvm_user_return_msr_values
{
216 } values
[KVM_MAX_NR_USER_RETURN_MSRS
];
219 u32 __read_mostly kvm_nr_uret_msrs
;
220 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs
);
221 static u32 __read_mostly kvm_uret_msrs_list
[KVM_MAX_NR_USER_RETURN_MSRS
];
222 static struct kvm_user_return_msrs __percpu
*user_return_msrs
;
224 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
225 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
226 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
227 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
229 u64 __read_mostly host_efer
;
230 EXPORT_SYMBOL_GPL(host_efer
);
232 bool __read_mostly allow_smaller_maxphyaddr
= 0;
233 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr
);
235 bool __read_mostly enable_apicv
= true;
236 EXPORT_SYMBOL_GPL(enable_apicv
);
238 u64 __read_mostly host_xss
;
239 EXPORT_SYMBOL_GPL(host_xss
);
241 u64 __read_mostly host_arch_capabilities
;
242 EXPORT_SYMBOL_GPL(host_arch_capabilities
);
244 const struct _kvm_stats_desc kvm_vm_stats_desc
[] = {
245 KVM_GENERIC_VM_STATS(),
246 STATS_DESC_COUNTER(VM
, mmu_shadow_zapped
),
247 STATS_DESC_COUNTER(VM
, mmu_pte_write
),
248 STATS_DESC_COUNTER(VM
, mmu_pde_zapped
),
249 STATS_DESC_COUNTER(VM
, mmu_flooded
),
250 STATS_DESC_COUNTER(VM
, mmu_recycled
),
251 STATS_DESC_COUNTER(VM
, mmu_cache_miss
),
252 STATS_DESC_ICOUNTER(VM
, mmu_unsync
),
253 STATS_DESC_ICOUNTER(VM
, pages_4k
),
254 STATS_DESC_ICOUNTER(VM
, pages_2m
),
255 STATS_DESC_ICOUNTER(VM
, pages_1g
),
256 STATS_DESC_ICOUNTER(VM
, nx_lpage_splits
),
257 STATS_DESC_PCOUNTER(VM
, max_mmu_rmap_size
),
258 STATS_DESC_PCOUNTER(VM
, max_mmu_page_hash_collisions
)
261 const struct kvm_stats_header kvm_vm_stats_header
= {
262 .name_size
= KVM_STATS_NAME_SIZE
,
263 .num_desc
= ARRAY_SIZE(kvm_vm_stats_desc
),
264 .id_offset
= sizeof(struct kvm_stats_header
),
265 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
266 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
267 sizeof(kvm_vm_stats_desc
),
270 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
271 KVM_GENERIC_VCPU_STATS(),
272 STATS_DESC_COUNTER(VCPU
, pf_taken
),
273 STATS_DESC_COUNTER(VCPU
, pf_fixed
),
274 STATS_DESC_COUNTER(VCPU
, pf_emulate
),
275 STATS_DESC_COUNTER(VCPU
, pf_spurious
),
276 STATS_DESC_COUNTER(VCPU
, pf_fast
),
277 STATS_DESC_COUNTER(VCPU
, pf_mmio_spte_created
),
278 STATS_DESC_COUNTER(VCPU
, pf_guest
),
279 STATS_DESC_COUNTER(VCPU
, tlb_flush
),
280 STATS_DESC_COUNTER(VCPU
, invlpg
),
281 STATS_DESC_COUNTER(VCPU
, exits
),
282 STATS_DESC_COUNTER(VCPU
, io_exits
),
283 STATS_DESC_COUNTER(VCPU
, mmio_exits
),
284 STATS_DESC_COUNTER(VCPU
, signal_exits
),
285 STATS_DESC_COUNTER(VCPU
, irq_window_exits
),
286 STATS_DESC_COUNTER(VCPU
, nmi_window_exits
),
287 STATS_DESC_COUNTER(VCPU
, l1d_flush
),
288 STATS_DESC_COUNTER(VCPU
, halt_exits
),
289 STATS_DESC_COUNTER(VCPU
, request_irq_exits
),
290 STATS_DESC_COUNTER(VCPU
, irq_exits
),
291 STATS_DESC_COUNTER(VCPU
, host_state_reload
),
292 STATS_DESC_COUNTER(VCPU
, fpu_reload
),
293 STATS_DESC_COUNTER(VCPU
, insn_emulation
),
294 STATS_DESC_COUNTER(VCPU
, insn_emulation_fail
),
295 STATS_DESC_COUNTER(VCPU
, hypercalls
),
296 STATS_DESC_COUNTER(VCPU
, irq_injections
),
297 STATS_DESC_COUNTER(VCPU
, nmi_injections
),
298 STATS_DESC_COUNTER(VCPU
, req_event
),
299 STATS_DESC_COUNTER(VCPU
, nested_run
),
300 STATS_DESC_COUNTER(VCPU
, directed_yield_attempted
),
301 STATS_DESC_COUNTER(VCPU
, directed_yield_successful
),
302 STATS_DESC_COUNTER(VCPU
, preemption_reported
),
303 STATS_DESC_COUNTER(VCPU
, preemption_other
),
304 STATS_DESC_IBOOLEAN(VCPU
, guest_mode
),
305 STATS_DESC_COUNTER(VCPU
, notify_window_exits
),
308 const struct kvm_stats_header kvm_vcpu_stats_header
= {
309 .name_size
= KVM_STATS_NAME_SIZE
,
310 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
311 .id_offset
= sizeof(struct kvm_stats_header
),
312 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
313 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
314 sizeof(kvm_vcpu_stats_desc
),
317 u64 __read_mostly host_xcr0
;
319 static struct kmem_cache
*x86_emulator_cache
;
322 * When called, it means the previous get/set msr reached an invalid msr.
323 * Return true if we want to ignore/silent this failed msr access.
325 static bool kvm_msr_ignored_check(u32 msr
, u64 data
, bool write
)
327 const char *op
= write
? "wrmsr" : "rdmsr";
330 if (report_ignored_msrs
)
331 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
336 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
342 static struct kmem_cache
*kvm_alloc_emulator_cache(void)
344 unsigned int useroffset
= offsetof(struct x86_emulate_ctxt
, src
);
345 unsigned int size
= sizeof(struct x86_emulate_ctxt
);
347 return kmem_cache_create_usercopy("x86_emulator", size
,
348 __alignof__(struct x86_emulate_ctxt
),
349 SLAB_ACCOUNT
, useroffset
,
350 size
- useroffset
, NULL
);
353 static int emulator_fix_hypercall(struct x86_emulate_ctxt
*ctxt
);
355 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu
*vcpu
)
358 for (i
= 0; i
< ASYNC_PF_PER_VCPU
; i
++)
359 vcpu
->arch
.apf
.gfns
[i
] = ~0;
362 static void kvm_on_user_return(struct user_return_notifier
*urn
)
365 struct kvm_user_return_msrs
*msrs
366 = container_of(urn
, struct kvm_user_return_msrs
, urn
);
367 struct kvm_user_return_msr_values
*values
;
371 * Disabling irqs at this point since the following code could be
372 * interrupted and executed through kvm_arch_hardware_disable()
374 local_irq_save(flags
);
375 if (msrs
->registered
) {
376 msrs
->registered
= false;
377 user_return_notifier_unregister(urn
);
379 local_irq_restore(flags
);
380 for (slot
= 0; slot
< kvm_nr_uret_msrs
; ++slot
) {
381 values
= &msrs
->values
[slot
];
382 if (values
->host
!= values
->curr
) {
383 wrmsrl(kvm_uret_msrs_list
[slot
], values
->host
);
384 values
->curr
= values
->host
;
389 static int kvm_probe_user_return_msr(u32 msr
)
395 ret
= rdmsrl_safe(msr
, &val
);
398 ret
= wrmsrl_safe(msr
, val
);
404 int kvm_add_user_return_msr(u32 msr
)
406 BUG_ON(kvm_nr_uret_msrs
>= KVM_MAX_NR_USER_RETURN_MSRS
);
408 if (kvm_probe_user_return_msr(msr
))
411 kvm_uret_msrs_list
[kvm_nr_uret_msrs
] = msr
;
412 return kvm_nr_uret_msrs
++;
414 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr
);
416 int kvm_find_user_return_msr(u32 msr
)
420 for (i
= 0; i
< kvm_nr_uret_msrs
; ++i
) {
421 if (kvm_uret_msrs_list
[i
] == msr
)
426 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr
);
428 static void kvm_user_return_msr_cpu_online(void)
430 unsigned int cpu
= smp_processor_id();
431 struct kvm_user_return_msrs
*msrs
= per_cpu_ptr(user_return_msrs
, cpu
);
435 for (i
= 0; i
< kvm_nr_uret_msrs
; ++i
) {
436 rdmsrl_safe(kvm_uret_msrs_list
[i
], &value
);
437 msrs
->values
[i
].host
= value
;
438 msrs
->values
[i
].curr
= value
;
442 int kvm_set_user_return_msr(unsigned slot
, u64 value
, u64 mask
)
444 unsigned int cpu
= smp_processor_id();
445 struct kvm_user_return_msrs
*msrs
= per_cpu_ptr(user_return_msrs
, cpu
);
448 value
= (value
& mask
) | (msrs
->values
[slot
].host
& ~mask
);
449 if (value
== msrs
->values
[slot
].curr
)
451 err
= wrmsrl_safe(kvm_uret_msrs_list
[slot
], value
);
455 msrs
->values
[slot
].curr
= value
;
456 if (!msrs
->registered
) {
457 msrs
->urn
.on_user_return
= kvm_on_user_return
;
458 user_return_notifier_register(&msrs
->urn
);
459 msrs
->registered
= true;
463 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr
);
465 static void drop_user_return_notifiers(void)
467 unsigned int cpu
= smp_processor_id();
468 struct kvm_user_return_msrs
*msrs
= per_cpu_ptr(user_return_msrs
, cpu
);
470 if (msrs
->registered
)
471 kvm_on_user_return(&msrs
->urn
);
474 u64
kvm_get_apic_base(struct kvm_vcpu
*vcpu
)
476 return vcpu
->arch
.apic_base
;
479 enum lapic_mode
kvm_get_apic_mode(struct kvm_vcpu
*vcpu
)
481 return kvm_apic_mode(kvm_get_apic_base(vcpu
));
483 EXPORT_SYMBOL_GPL(kvm_get_apic_mode
);
485 int kvm_set_apic_base(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
487 enum lapic_mode old_mode
= kvm_get_apic_mode(vcpu
);
488 enum lapic_mode new_mode
= kvm_apic_mode(msr_info
->data
);
489 u64 reserved_bits
= kvm_vcpu_reserved_gpa_bits_raw(vcpu
) | 0x2ff |
490 (guest_cpuid_has(vcpu
, X86_FEATURE_X2APIC
) ? 0 : X2APIC_ENABLE
);
492 if ((msr_info
->data
& reserved_bits
) != 0 || new_mode
== LAPIC_MODE_INVALID
)
494 if (!msr_info
->host_initiated
) {
495 if (old_mode
== LAPIC_MODE_X2APIC
&& new_mode
== LAPIC_MODE_XAPIC
)
497 if (old_mode
== LAPIC_MODE_DISABLED
&& new_mode
== LAPIC_MODE_X2APIC
)
501 kvm_lapic_set_base(vcpu
, msr_info
->data
);
502 kvm_recalculate_apic_map(vcpu
->kvm
);
507 * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
509 * Hardware virtualization extension instructions may fault if a reboot turns
510 * off virtualization while processes are running. Usually after catching the
511 * fault we just panic; during reboot instead the instruction is ignored.
513 noinstr
void kvm_spurious_fault(void)
515 /* Fault while not rebooting. We want the trace. */
516 BUG_ON(!kvm_rebooting
);
518 EXPORT_SYMBOL_GPL(kvm_spurious_fault
);
520 #define EXCPT_BENIGN 0
521 #define EXCPT_CONTRIBUTORY 1
524 static int exception_class(int vector
)
534 return EXCPT_CONTRIBUTORY
;
541 #define EXCPT_FAULT 0
543 #define EXCPT_ABORT 2
544 #define EXCPT_INTERRUPT 3
547 static int exception_type(int vector
)
551 if (WARN_ON(vector
> 31 || vector
== NMI_VECTOR
))
552 return EXCPT_INTERRUPT
;
557 * #DBs can be trap-like or fault-like, the caller must check other CPU
558 * state, e.g. DR6, to determine whether a #DB is a trap or fault.
560 if (mask
& (1 << DB_VECTOR
))
563 if (mask
& ((1 << BP_VECTOR
) | (1 << OF_VECTOR
)))
566 if (mask
& ((1 << DF_VECTOR
) | (1 << MC_VECTOR
)))
569 /* Reserved exceptions will result in fault */
573 void kvm_deliver_exception_payload(struct kvm_vcpu
*vcpu
,
574 struct kvm_queued_exception
*ex
)
576 if (!ex
->has_payload
)
579 switch (ex
->vector
) {
582 * "Certain debug exceptions may clear bit 0-3. The
583 * remaining contents of the DR6 register are never
584 * cleared by the processor".
586 vcpu
->arch
.dr6
&= ~DR_TRAP_BITS
;
588 * In order to reflect the #DB exception payload in guest
589 * dr6, three components need to be considered: active low
590 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
592 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
593 * In the target guest dr6:
594 * FIXED_1 bits should always be set.
595 * Active low bits should be cleared if 1-setting in payload.
596 * Active high bits should be set if 1-setting in payload.
598 * Note, the payload is compatible with the pending debug
599 * exceptions/exit qualification under VMX, that active_low bits
600 * are active high in payload.
601 * So they need to be flipped for DR6.
603 vcpu
->arch
.dr6
|= DR6_ACTIVE_LOW
;
604 vcpu
->arch
.dr6
|= ex
->payload
;
605 vcpu
->arch
.dr6
^= ex
->payload
& DR6_ACTIVE_LOW
;
608 * The #DB payload is defined as compatible with the 'pending
609 * debug exceptions' field under VMX, not DR6. While bit 12 is
610 * defined in the 'pending debug exceptions' field (enabled
611 * breakpoint), it is reserved and must be zero in DR6.
613 vcpu
->arch
.dr6
&= ~BIT(12);
616 vcpu
->arch
.cr2
= ex
->payload
;
620 ex
->has_payload
= false;
623 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload
);
625 static void kvm_queue_exception_vmexit(struct kvm_vcpu
*vcpu
, unsigned int vector
,
626 bool has_error_code
, u32 error_code
,
627 bool has_payload
, unsigned long payload
)
629 struct kvm_queued_exception
*ex
= &vcpu
->arch
.exception_vmexit
;
632 ex
->injected
= false;
634 ex
->has_error_code
= has_error_code
;
635 ex
->error_code
= error_code
;
636 ex
->has_payload
= has_payload
;
637 ex
->payload
= payload
;
640 /* Forcibly leave the nested mode in cases like a vCPU reset */
641 static void kvm_leave_nested(struct kvm_vcpu
*vcpu
)
643 kvm_x86_ops
.nested_ops
->leave_nested(vcpu
);
646 static void kvm_multiple_exception(struct kvm_vcpu
*vcpu
,
647 unsigned nr
, bool has_error
, u32 error_code
,
648 bool has_payload
, unsigned long payload
, bool reinject
)
653 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
656 * If the exception is destined for L2 and isn't being reinjected,
657 * morph it to a VM-Exit if L1 wants to intercept the exception. A
658 * previously injected exception is not checked because it was checked
659 * when it was original queued, and re-checking is incorrect if _L1_
660 * injected the exception, in which case it's exempt from interception.
662 if (!reinject
&& is_guest_mode(vcpu
) &&
663 kvm_x86_ops
.nested_ops
->is_exception_vmexit(vcpu
, nr
, error_code
)) {
664 kvm_queue_exception_vmexit(vcpu
, nr
, has_error
, error_code
,
665 has_payload
, payload
);
669 if (!vcpu
->arch
.exception
.pending
&& !vcpu
->arch
.exception
.injected
) {
673 * On VM-Entry, an exception can be pending if and only
674 * if event injection was blocked by nested_run_pending.
675 * In that case, however, vcpu_enter_guest() requests an
676 * immediate exit, and the guest shouldn't proceed far
677 * enough to need reinjection.
679 WARN_ON_ONCE(kvm_is_exception_pending(vcpu
));
680 vcpu
->arch
.exception
.injected
= true;
681 if (WARN_ON_ONCE(has_payload
)) {
683 * A reinjected event has already
684 * delivered its payload.
690 vcpu
->arch
.exception
.pending
= true;
691 vcpu
->arch
.exception
.injected
= false;
693 vcpu
->arch
.exception
.has_error_code
= has_error
;
694 vcpu
->arch
.exception
.vector
= nr
;
695 vcpu
->arch
.exception
.error_code
= error_code
;
696 vcpu
->arch
.exception
.has_payload
= has_payload
;
697 vcpu
->arch
.exception
.payload
= payload
;
698 if (!is_guest_mode(vcpu
))
699 kvm_deliver_exception_payload(vcpu
,
700 &vcpu
->arch
.exception
);
704 /* to check exception */
705 prev_nr
= vcpu
->arch
.exception
.vector
;
706 if (prev_nr
== DF_VECTOR
) {
707 /* triple fault -> shutdown */
708 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
711 class1
= exception_class(prev_nr
);
712 class2
= exception_class(nr
);
713 if ((class1
== EXCPT_CONTRIBUTORY
&& class2
== EXCPT_CONTRIBUTORY
) ||
714 (class1
== EXCPT_PF
&& class2
!= EXCPT_BENIGN
)) {
716 * Synthesize #DF. Clear the previously injected or pending
717 * exception so as not to incorrectly trigger shutdown.
719 vcpu
->arch
.exception
.injected
= false;
720 vcpu
->arch
.exception
.pending
= false;
722 kvm_queue_exception_e(vcpu
, DF_VECTOR
, 0);
724 /* replace previous exception with a new one in a hope
725 that instruction re-execution will regenerate lost
731 void kvm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
)
733 kvm_multiple_exception(vcpu
, nr
, false, 0, false, 0, false);
735 EXPORT_SYMBOL_GPL(kvm_queue_exception
);
737 void kvm_requeue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
)
739 kvm_multiple_exception(vcpu
, nr
, false, 0, false, 0, true);
741 EXPORT_SYMBOL_GPL(kvm_requeue_exception
);
743 void kvm_queue_exception_p(struct kvm_vcpu
*vcpu
, unsigned nr
,
744 unsigned long payload
)
746 kvm_multiple_exception(vcpu
, nr
, false, 0, true, payload
, false);
748 EXPORT_SYMBOL_GPL(kvm_queue_exception_p
);
750 static void kvm_queue_exception_e_p(struct kvm_vcpu
*vcpu
, unsigned nr
,
751 u32 error_code
, unsigned long payload
)
753 kvm_multiple_exception(vcpu
, nr
, true, error_code
,
754 true, payload
, false);
757 int kvm_complete_insn_gp(struct kvm_vcpu
*vcpu
, int err
)
760 kvm_inject_gp(vcpu
, 0);
762 return kvm_skip_emulated_instruction(vcpu
);
766 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp
);
768 static int complete_emulated_insn_gp(struct kvm_vcpu
*vcpu
, int err
)
771 kvm_inject_gp(vcpu
, 0);
775 return kvm_emulate_instruction(vcpu
, EMULTYPE_NO_DECODE
| EMULTYPE_SKIP
|
776 EMULTYPE_COMPLETE_USER_EXIT
);
779 void kvm_inject_page_fault(struct kvm_vcpu
*vcpu
, struct x86_exception
*fault
)
781 ++vcpu
->stat
.pf_guest
;
784 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
785 * whether or not L1 wants to intercept "regular" #PF.
787 if (is_guest_mode(vcpu
) && fault
->async_page_fault
)
788 kvm_queue_exception_vmexit(vcpu
, PF_VECTOR
,
789 true, fault
->error_code
,
790 true, fault
->address
);
792 kvm_queue_exception_e_p(vcpu
, PF_VECTOR
, fault
->error_code
,
796 void kvm_inject_emulated_page_fault(struct kvm_vcpu
*vcpu
,
797 struct x86_exception
*fault
)
799 struct kvm_mmu
*fault_mmu
;
800 WARN_ON_ONCE(fault
->vector
!= PF_VECTOR
);
802 fault_mmu
= fault
->nested_page_fault
? vcpu
->arch
.mmu
:
806 * Invalidate the TLB entry for the faulting address, if it exists,
807 * else the access will fault indefinitely (and to emulate hardware).
809 if ((fault
->error_code
& PFERR_PRESENT_MASK
) &&
810 !(fault
->error_code
& PFERR_RSVD_MASK
))
811 kvm_mmu_invalidate_addr(vcpu
, fault_mmu
, fault
->address
,
812 KVM_MMU_ROOT_CURRENT
);
814 fault_mmu
->inject_page_fault(vcpu
, fault
);
816 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault
);
818 void kvm_inject_nmi(struct kvm_vcpu
*vcpu
)
820 atomic_inc(&vcpu
->arch
.nmi_queued
);
821 kvm_make_request(KVM_REQ_NMI
, vcpu
);
824 void kvm_queue_exception_e(struct kvm_vcpu
*vcpu
, unsigned nr
, u32 error_code
)
826 kvm_multiple_exception(vcpu
, nr
, true, error_code
, false, 0, false);
828 EXPORT_SYMBOL_GPL(kvm_queue_exception_e
);
830 void kvm_requeue_exception_e(struct kvm_vcpu
*vcpu
, unsigned nr
, u32 error_code
)
832 kvm_multiple_exception(vcpu
, nr
, true, error_code
, false, 0, true);
834 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e
);
837 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
838 * a #GP and return false.
840 bool kvm_require_cpl(struct kvm_vcpu
*vcpu
, int required_cpl
)
842 if (static_call(kvm_x86_get_cpl
)(vcpu
) <= required_cpl
)
844 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
848 bool kvm_require_dr(struct kvm_vcpu
*vcpu
, int dr
)
850 if ((dr
!= 4 && dr
!= 5) || !kvm_is_cr4_bit_set(vcpu
, X86_CR4_DE
))
853 kvm_queue_exception(vcpu
, UD_VECTOR
);
856 EXPORT_SYMBOL_GPL(kvm_require_dr
);
858 static inline u64
pdptr_rsvd_bits(struct kvm_vcpu
*vcpu
)
860 return vcpu
->arch
.reserved_gpa_bits
| rsvd_bits(5, 8) | rsvd_bits(1, 2);
864 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
866 int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
868 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
869 gfn_t pdpt_gfn
= cr3
>> PAGE_SHIFT
;
873 u64 pdpte
[ARRAY_SIZE(mmu
->pdptrs
)];
876 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
879 real_gpa
= kvm_translate_gpa(vcpu
, mmu
, gfn_to_gpa(pdpt_gfn
),
880 PFERR_USER_MASK
| PFERR_WRITE_MASK
, NULL
);
881 if (real_gpa
== INVALID_GPA
)
884 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
885 ret
= kvm_vcpu_read_guest_page(vcpu
, gpa_to_gfn(real_gpa
), pdpte
,
886 cr3
& GENMASK(11, 5), sizeof(pdpte
));
890 for (i
= 0; i
< ARRAY_SIZE(pdpte
); ++i
) {
891 if ((pdpte
[i
] & PT_PRESENT_MASK
) &&
892 (pdpte
[i
] & pdptr_rsvd_bits(vcpu
))) {
898 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
899 * Shadow page roots need to be reconstructed instead.
901 if (!tdp_enabled
&& memcmp(mmu
->pdptrs
, pdpte
, sizeof(mmu
->pdptrs
)))
902 kvm_mmu_free_roots(vcpu
->kvm
, mmu
, KVM_MMU_ROOT_CURRENT
);
904 memcpy(mmu
->pdptrs
, pdpte
, sizeof(mmu
->pdptrs
));
905 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_PDPTR
);
906 kvm_make_request(KVM_REQ_LOAD_MMU_PGD
, vcpu
);
907 vcpu
->arch
.pdptrs_from_userspace
= false;
911 EXPORT_SYMBOL_GPL(load_pdptrs
);
913 static bool kvm_is_valid_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
916 if (cr0
& 0xffffffff00000000UL
)
920 if ((cr0
& X86_CR0_NW
) && !(cr0
& X86_CR0_CD
))
923 if ((cr0
& X86_CR0_PG
) && !(cr0
& X86_CR0_PE
))
926 return static_call(kvm_x86_is_valid_cr0
)(vcpu
, cr0
);
929 void kvm_post_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long old_cr0
, unsigned long cr0
)
932 * CR0.WP is incorporated into the MMU role, but only for non-nested,
933 * indirect shadow MMUs. If paging is disabled, no updates are needed
934 * as there are no permission bits to emulate. If TDP is enabled, the
935 * MMU's metadata needs to be updated, e.g. so that emulating guest
936 * translations does the right thing, but there's no need to unload the
937 * root as CR0.WP doesn't affect SPTEs.
939 if ((cr0
^ old_cr0
) == X86_CR0_WP
) {
940 if (!(cr0
& X86_CR0_PG
))
949 if ((cr0
^ old_cr0
) & X86_CR0_PG
) {
950 kvm_clear_async_pf_completion_queue(vcpu
);
951 kvm_async_pf_hash_reset(vcpu
);
954 * Clearing CR0.PG is defined to flush the TLB from the guest's
957 if (!(cr0
& X86_CR0_PG
))
958 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
961 if ((cr0
^ old_cr0
) & KVM_MMU_CR0_ROLE_BITS
)
962 kvm_mmu_reset_context(vcpu
);
964 if (((cr0
^ old_cr0
) & X86_CR0_CD
) &&
965 kvm_mmu_honors_guest_mtrrs(vcpu
->kvm
) &&
966 !kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_CD_NW_CLEARED
))
967 kvm_zap_gfn_range(vcpu
->kvm
, 0, ~0ULL);
969 EXPORT_SYMBOL_GPL(kvm_post_set_cr0
);
971 int kvm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
973 unsigned long old_cr0
= kvm_read_cr0(vcpu
);
975 if (!kvm_is_valid_cr0(vcpu
, cr0
))
980 /* Write to CR0 reserved bits are ignored, even on Intel. */
981 cr0
&= ~CR0_RESERVED_BITS
;
984 if ((vcpu
->arch
.efer
& EFER_LME
) && !is_paging(vcpu
) &&
985 (cr0
& X86_CR0_PG
)) {
990 static_call(kvm_x86_get_cs_db_l_bits
)(vcpu
, &cs_db
, &cs_l
);
995 if (!(vcpu
->arch
.efer
& EFER_LME
) && (cr0
& X86_CR0_PG
) &&
996 is_pae(vcpu
) && ((cr0
^ old_cr0
) & X86_CR0_PDPTR_BITS
) &&
997 !load_pdptrs(vcpu
, kvm_read_cr3(vcpu
)))
1000 if (!(cr0
& X86_CR0_PG
) &&
1001 (is_64_bit_mode(vcpu
) || kvm_is_cr4_bit_set(vcpu
, X86_CR4_PCIDE
)))
1004 static_call(kvm_x86_set_cr0
)(vcpu
, cr0
);
1006 kvm_post_set_cr0(vcpu
, old_cr0
, cr0
);
1010 EXPORT_SYMBOL_GPL(kvm_set_cr0
);
1012 void kvm_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
)
1014 (void)kvm_set_cr0(vcpu
, kvm_read_cr0_bits(vcpu
, ~0x0eul
) | (msw
& 0x0f));
1016 EXPORT_SYMBOL_GPL(kvm_lmsw
);
1018 void kvm_load_guest_xsave_state(struct kvm_vcpu
*vcpu
)
1020 if (vcpu
->arch
.guest_state_protected
)
1023 if (kvm_is_cr4_bit_set(vcpu
, X86_CR4_OSXSAVE
)) {
1025 if (vcpu
->arch
.xcr0
!= host_xcr0
)
1026 xsetbv(XCR_XFEATURE_ENABLED_MASK
, vcpu
->arch
.xcr0
);
1028 if (guest_can_use(vcpu
, X86_FEATURE_XSAVES
) &&
1029 vcpu
->arch
.ia32_xss
!= host_xss
)
1030 wrmsrl(MSR_IA32_XSS
, vcpu
->arch
.ia32_xss
);
1033 if (cpu_feature_enabled(X86_FEATURE_PKU
) &&
1034 vcpu
->arch
.pkru
!= vcpu
->arch
.host_pkru
&&
1035 ((vcpu
->arch
.xcr0
& XFEATURE_MASK_PKRU
) ||
1036 kvm_is_cr4_bit_set(vcpu
, X86_CR4_PKE
)))
1037 write_pkru(vcpu
->arch
.pkru
);
1039 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state
);
1041 void kvm_load_host_xsave_state(struct kvm_vcpu
*vcpu
)
1043 if (vcpu
->arch
.guest_state_protected
)
1046 if (cpu_feature_enabled(X86_FEATURE_PKU
) &&
1047 ((vcpu
->arch
.xcr0
& XFEATURE_MASK_PKRU
) ||
1048 kvm_is_cr4_bit_set(vcpu
, X86_CR4_PKE
))) {
1049 vcpu
->arch
.pkru
= rdpkru();
1050 if (vcpu
->arch
.pkru
!= vcpu
->arch
.host_pkru
)
1051 write_pkru(vcpu
->arch
.host_pkru
);
1054 if (kvm_is_cr4_bit_set(vcpu
, X86_CR4_OSXSAVE
)) {
1056 if (vcpu
->arch
.xcr0
!= host_xcr0
)
1057 xsetbv(XCR_XFEATURE_ENABLED_MASK
, host_xcr0
);
1059 if (guest_can_use(vcpu
, X86_FEATURE_XSAVES
) &&
1060 vcpu
->arch
.ia32_xss
!= host_xss
)
1061 wrmsrl(MSR_IA32_XSS
, host_xss
);
1065 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state
);
1067 #ifdef CONFIG_X86_64
1068 static inline u64
kvm_guest_supported_xfd(struct kvm_vcpu
*vcpu
)
1070 return vcpu
->arch
.guest_supported_xcr0
& XFEATURE_MASK_USER_DYNAMIC
;
1074 static int __kvm_set_xcr(struct kvm_vcpu
*vcpu
, u32 index
, u64 xcr
)
1077 u64 old_xcr0
= vcpu
->arch
.xcr0
;
1080 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
1081 if (index
!= XCR_XFEATURE_ENABLED_MASK
)
1083 if (!(xcr0
& XFEATURE_MASK_FP
))
1085 if ((xcr0
& XFEATURE_MASK_YMM
) && !(xcr0
& XFEATURE_MASK_SSE
))
1089 * Do not allow the guest to set bits that we do not support
1090 * saving. However, xcr0 bit 0 is always set, even if the
1091 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
1093 valid_bits
= vcpu
->arch
.guest_supported_xcr0
| XFEATURE_MASK_FP
;
1094 if (xcr0
& ~valid_bits
)
1097 if ((!(xcr0
& XFEATURE_MASK_BNDREGS
)) !=
1098 (!(xcr0
& XFEATURE_MASK_BNDCSR
)))
1101 if (xcr0
& XFEATURE_MASK_AVX512
) {
1102 if (!(xcr0
& XFEATURE_MASK_YMM
))
1104 if ((xcr0
& XFEATURE_MASK_AVX512
) != XFEATURE_MASK_AVX512
)
1108 if ((xcr0
& XFEATURE_MASK_XTILE
) &&
1109 ((xcr0
& XFEATURE_MASK_XTILE
) != XFEATURE_MASK_XTILE
))
1112 vcpu
->arch
.xcr0
= xcr0
;
1114 if ((xcr0
^ old_xcr0
) & XFEATURE_MASK_EXTEND
)
1115 kvm_update_cpuid_runtime(vcpu
);
1119 int kvm_emulate_xsetbv(struct kvm_vcpu
*vcpu
)
1121 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
1122 if (static_call(kvm_x86_get_cpl
)(vcpu
) != 0 ||
1123 __kvm_set_xcr(vcpu
, kvm_rcx_read(vcpu
), kvm_read_edx_eax(vcpu
))) {
1124 kvm_inject_gp(vcpu
, 0);
1128 return kvm_skip_emulated_instruction(vcpu
);
1130 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv
);
1132 bool __kvm_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1134 if (cr4
& cr4_reserved_bits
)
1137 if (cr4
& vcpu
->arch
.cr4_guest_rsvd_bits
)
1142 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4
);
1144 static bool kvm_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1146 return __kvm_is_valid_cr4(vcpu
, cr4
) &&
1147 static_call(kvm_x86_is_valid_cr4
)(vcpu
, cr4
);
1150 void kvm_post_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long old_cr4
, unsigned long cr4
)
1152 if ((cr4
^ old_cr4
) & KVM_MMU_CR4_ROLE_BITS
)
1153 kvm_mmu_reset_context(vcpu
);
1156 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB
1157 * according to the SDM; however, stale prev_roots could be reused
1158 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we
1159 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST
1160 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed,
1164 (cr4
& X86_CR4_PCIDE
) && !(old_cr4
& X86_CR4_PCIDE
))
1165 kvm_mmu_unload(vcpu
);
1168 * The TLB has to be flushed for all PCIDs if any of the following
1169 * (architecturally required) changes happen:
1170 * - CR4.PCIDE is changed from 1 to 0
1171 * - CR4.PGE is toggled
1173 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT.
1175 if (((cr4
^ old_cr4
) & X86_CR4_PGE
) ||
1176 (!(cr4
& X86_CR4_PCIDE
) && (old_cr4
& X86_CR4_PCIDE
)))
1177 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
1180 * The TLB has to be flushed for the current PCID if any of the
1181 * following (architecturally required) changes happen:
1182 * - CR4.SMEP is changed from 0 to 1
1183 * - CR4.PAE is toggled
1185 else if (((cr4
^ old_cr4
) & X86_CR4_PAE
) ||
1186 ((cr4
& X86_CR4_SMEP
) && !(old_cr4
& X86_CR4_SMEP
)))
1187 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
1190 EXPORT_SYMBOL_GPL(kvm_post_set_cr4
);
1192 int kvm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1194 unsigned long old_cr4
= kvm_read_cr4(vcpu
);
1196 if (!kvm_is_valid_cr4(vcpu
, cr4
))
1199 if (is_long_mode(vcpu
)) {
1200 if (!(cr4
& X86_CR4_PAE
))
1202 if ((cr4
^ old_cr4
) & X86_CR4_LA57
)
1204 } else if (is_paging(vcpu
) && (cr4
& X86_CR4_PAE
)
1205 && ((cr4
^ old_cr4
) & X86_CR4_PDPTR_BITS
)
1206 && !load_pdptrs(vcpu
, kvm_read_cr3(vcpu
)))
1209 if ((cr4
& X86_CR4_PCIDE
) && !(old_cr4
& X86_CR4_PCIDE
)) {
1210 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1211 if ((kvm_read_cr3(vcpu
) & X86_CR3_PCID_MASK
) || !is_long_mode(vcpu
))
1215 static_call(kvm_x86_set_cr4
)(vcpu
, cr4
);
1217 kvm_post_set_cr4(vcpu
, old_cr4
, cr4
);
1221 EXPORT_SYMBOL_GPL(kvm_set_cr4
);
1223 static void kvm_invalidate_pcid(struct kvm_vcpu
*vcpu
, unsigned long pcid
)
1225 struct kvm_mmu
*mmu
= vcpu
->arch
.mmu
;
1226 unsigned long roots_to_free
= 0;
1230 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but
1231 * this is reachable when running EPT=1 and unrestricted_guest=0, and
1232 * also via the emulator. KVM's TDP page tables are not in the scope of
1233 * the invalidation, but the guest's TLB entries need to be flushed as
1234 * the CPU may have cached entries in its TLB for the target PCID.
1236 if (unlikely(tdp_enabled
)) {
1237 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
1242 * If neither the current CR3 nor any of the prev_roots use the given
1243 * PCID, then nothing needs to be done here because a resync will
1244 * happen anyway before switching to any other CR3.
1246 if (kvm_get_active_pcid(vcpu
) == pcid
) {
1247 kvm_make_request(KVM_REQ_MMU_SYNC
, vcpu
);
1248 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
1252 * If PCID is disabled, there is no need to free prev_roots even if the
1253 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
1256 if (!kvm_is_cr4_bit_set(vcpu
, X86_CR4_PCIDE
))
1259 for (i
= 0; i
< KVM_MMU_NUM_PREV_ROOTS
; i
++)
1260 if (kvm_get_pcid(vcpu
, mmu
->prev_roots
[i
].pgd
) == pcid
)
1261 roots_to_free
|= KVM_MMU_ROOT_PREVIOUS(i
);
1263 kvm_mmu_free_roots(vcpu
->kvm
, mmu
, roots_to_free
);
1266 int kvm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
1268 bool skip_tlb_flush
= false;
1269 unsigned long pcid
= 0;
1270 #ifdef CONFIG_X86_64
1271 if (kvm_is_cr4_bit_set(vcpu
, X86_CR4_PCIDE
)) {
1272 skip_tlb_flush
= cr3
& X86_CR3_PCID_NOFLUSH
;
1273 cr3
&= ~X86_CR3_PCID_NOFLUSH
;
1274 pcid
= cr3
& X86_CR3_PCID_MASK
;
1278 /* PDPTRs are always reloaded for PAE paging. */
1279 if (cr3
== kvm_read_cr3(vcpu
) && !is_pae_paging(vcpu
))
1280 goto handle_tlb_flush
;
1283 * Do not condition the GPA check on long mode, this helper is used to
1284 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
1285 * the current vCPU mode is accurate.
1287 if (kvm_vcpu_is_illegal_gpa(vcpu
, cr3
))
1290 if (is_pae_paging(vcpu
) && !load_pdptrs(vcpu
, cr3
))
1293 if (cr3
!= kvm_read_cr3(vcpu
))
1294 kvm_mmu_new_pgd(vcpu
, cr3
);
1296 vcpu
->arch
.cr3
= cr3
;
1297 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_CR3
);
1298 /* Do not call post_set_cr3, we do not get here for confidential guests. */
1302 * A load of CR3 that flushes the TLB flushes only the current PCID,
1303 * even if PCID is disabled, in which case PCID=0 is flushed. It's a
1304 * moot point in the end because _disabling_ PCID will flush all PCIDs,
1305 * and it's impossible to use a non-zero PCID when PCID is disabled,
1306 * i.e. only PCID=0 can be relevant.
1308 if (!skip_tlb_flush
)
1309 kvm_invalidate_pcid(vcpu
, pcid
);
1313 EXPORT_SYMBOL_GPL(kvm_set_cr3
);
1315 int kvm_set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
1317 if (cr8
& CR8_RESERVED_BITS
)
1319 if (lapic_in_kernel(vcpu
))
1320 kvm_lapic_set_tpr(vcpu
, cr8
);
1322 vcpu
->arch
.cr8
= cr8
;
1325 EXPORT_SYMBOL_GPL(kvm_set_cr8
);
1327 unsigned long kvm_get_cr8(struct kvm_vcpu
*vcpu
)
1329 if (lapic_in_kernel(vcpu
))
1330 return kvm_lapic_get_cr8(vcpu
);
1332 return vcpu
->arch
.cr8
;
1334 EXPORT_SYMBOL_GPL(kvm_get_cr8
);
1336 static void kvm_update_dr0123(struct kvm_vcpu
*vcpu
)
1340 if (!(vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)) {
1341 for (i
= 0; i
< KVM_NR_DB_REGS
; i
++)
1342 vcpu
->arch
.eff_db
[i
] = vcpu
->arch
.db
[i
];
1346 void kvm_update_dr7(struct kvm_vcpu
*vcpu
)
1350 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
1351 dr7
= vcpu
->arch
.guest_debug_dr7
;
1353 dr7
= vcpu
->arch
.dr7
;
1354 static_call(kvm_x86_set_dr7
)(vcpu
, dr7
);
1355 vcpu
->arch
.switch_db_regs
&= ~KVM_DEBUGREG_BP_ENABLED
;
1356 if (dr7
& DR7_BP_EN_MASK
)
1357 vcpu
->arch
.switch_db_regs
|= KVM_DEBUGREG_BP_ENABLED
;
1359 EXPORT_SYMBOL_GPL(kvm_update_dr7
);
1361 static u64
kvm_dr6_fixed(struct kvm_vcpu
*vcpu
)
1363 u64 fixed
= DR6_FIXED_1
;
1365 if (!guest_cpuid_has(vcpu
, X86_FEATURE_RTM
))
1368 if (!guest_cpuid_has(vcpu
, X86_FEATURE_BUS_LOCK_DETECT
))
1369 fixed
|= DR6_BUS_LOCK
;
1373 int kvm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long val
)
1375 size_t size
= ARRAY_SIZE(vcpu
->arch
.db
);
1379 vcpu
->arch
.db
[array_index_nospec(dr
, size
)] = val
;
1380 if (!(vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
))
1381 vcpu
->arch
.eff_db
[dr
] = val
;
1385 if (!kvm_dr6_valid(val
))
1387 vcpu
->arch
.dr6
= (val
& DR6_VOLATILE
) | kvm_dr6_fixed(vcpu
);
1391 if (!kvm_dr7_valid(val
))
1393 vcpu
->arch
.dr7
= (val
& DR7_VOLATILE
) | DR7_FIXED_1
;
1394 kvm_update_dr7(vcpu
);
1400 EXPORT_SYMBOL_GPL(kvm_set_dr
);
1402 void kvm_get_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long *val
)
1404 size_t size
= ARRAY_SIZE(vcpu
->arch
.db
);
1408 *val
= vcpu
->arch
.db
[array_index_nospec(dr
, size
)];
1412 *val
= vcpu
->arch
.dr6
;
1416 *val
= vcpu
->arch
.dr7
;
1420 EXPORT_SYMBOL_GPL(kvm_get_dr
);
1422 int kvm_emulate_rdpmc(struct kvm_vcpu
*vcpu
)
1424 u32 ecx
= kvm_rcx_read(vcpu
);
1427 if (kvm_pmu_rdpmc(vcpu
, ecx
, &data
)) {
1428 kvm_inject_gp(vcpu
, 0);
1432 kvm_rax_write(vcpu
, (u32
)data
);
1433 kvm_rdx_write(vcpu
, data
>> 32);
1434 return kvm_skip_emulated_instruction(vcpu
);
1436 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc
);
1439 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
1440 * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
1441 * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
1442 * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
1443 * MSRs that KVM emulates without strictly requiring host support.
1444 * msr_based_features holds MSRs that enumerate features, i.e. are effectively
1445 * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
1446 * msrs_to_save and emulated_msrs.
1449 static const u32 msrs_to_save_base
[] = {
1450 MSR_IA32_SYSENTER_CS
, MSR_IA32_SYSENTER_ESP
, MSR_IA32_SYSENTER_EIP
,
1452 #ifdef CONFIG_X86_64
1453 MSR_CSTAR
, MSR_KERNEL_GS_BASE
, MSR_SYSCALL_MASK
, MSR_LSTAR
,
1455 MSR_IA32_TSC
, MSR_IA32_CR_PAT
, MSR_VM_HSAVE_PA
,
1456 MSR_IA32_FEAT_CTL
, MSR_IA32_BNDCFGS
, MSR_TSC_AUX
,
1457 MSR_IA32_SPEC_CTRL
, MSR_IA32_TSX_CTRL
,
1458 MSR_IA32_RTIT_CTL
, MSR_IA32_RTIT_STATUS
, MSR_IA32_RTIT_CR3_MATCH
,
1459 MSR_IA32_RTIT_OUTPUT_BASE
, MSR_IA32_RTIT_OUTPUT_MASK
,
1460 MSR_IA32_RTIT_ADDR0_A
, MSR_IA32_RTIT_ADDR0_B
,
1461 MSR_IA32_RTIT_ADDR1_A
, MSR_IA32_RTIT_ADDR1_B
,
1462 MSR_IA32_RTIT_ADDR2_A
, MSR_IA32_RTIT_ADDR2_B
,
1463 MSR_IA32_RTIT_ADDR3_A
, MSR_IA32_RTIT_ADDR3_B
,
1464 MSR_IA32_UMWAIT_CONTROL
,
1466 MSR_IA32_XFD
, MSR_IA32_XFD_ERR
,
1469 static const u32 msrs_to_save_pmu
[] = {
1470 MSR_ARCH_PERFMON_FIXED_CTR0
, MSR_ARCH_PERFMON_FIXED_CTR1
,
1471 MSR_ARCH_PERFMON_FIXED_CTR0
+ 2,
1472 MSR_CORE_PERF_FIXED_CTR_CTRL
, MSR_CORE_PERF_GLOBAL_STATUS
,
1473 MSR_CORE_PERF_GLOBAL_CTRL
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
1474 MSR_IA32_PEBS_ENABLE
, MSR_IA32_DS_AREA
, MSR_PEBS_DATA_CFG
,
1476 /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */
1477 MSR_ARCH_PERFMON_PERFCTR0
, MSR_ARCH_PERFMON_PERFCTR1
,
1478 MSR_ARCH_PERFMON_PERFCTR0
+ 2, MSR_ARCH_PERFMON_PERFCTR0
+ 3,
1479 MSR_ARCH_PERFMON_PERFCTR0
+ 4, MSR_ARCH_PERFMON_PERFCTR0
+ 5,
1480 MSR_ARCH_PERFMON_PERFCTR0
+ 6, MSR_ARCH_PERFMON_PERFCTR0
+ 7,
1481 MSR_ARCH_PERFMON_EVENTSEL0
, MSR_ARCH_PERFMON_EVENTSEL1
,
1482 MSR_ARCH_PERFMON_EVENTSEL0
+ 2, MSR_ARCH_PERFMON_EVENTSEL0
+ 3,
1483 MSR_ARCH_PERFMON_EVENTSEL0
+ 4, MSR_ARCH_PERFMON_EVENTSEL0
+ 5,
1484 MSR_ARCH_PERFMON_EVENTSEL0
+ 6, MSR_ARCH_PERFMON_EVENTSEL0
+ 7,
1486 MSR_K7_EVNTSEL0
, MSR_K7_EVNTSEL1
, MSR_K7_EVNTSEL2
, MSR_K7_EVNTSEL3
,
1487 MSR_K7_PERFCTR0
, MSR_K7_PERFCTR1
, MSR_K7_PERFCTR2
, MSR_K7_PERFCTR3
,
1489 /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */
1490 MSR_F15H_PERF_CTL0
, MSR_F15H_PERF_CTL1
, MSR_F15H_PERF_CTL2
,
1491 MSR_F15H_PERF_CTL3
, MSR_F15H_PERF_CTL4
, MSR_F15H_PERF_CTL5
,
1492 MSR_F15H_PERF_CTR0
, MSR_F15H_PERF_CTR1
, MSR_F15H_PERF_CTR2
,
1493 MSR_F15H_PERF_CTR3
, MSR_F15H_PERF_CTR4
, MSR_F15H_PERF_CTR5
,
1495 MSR_AMD64_PERF_CNTR_GLOBAL_CTL
,
1496 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS
,
1497 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR
,
1500 static u32 msrs_to_save
[ARRAY_SIZE(msrs_to_save_base
) +
1501 ARRAY_SIZE(msrs_to_save_pmu
)];
1502 static unsigned num_msrs_to_save
;
1504 static const u32 emulated_msrs_all
[] = {
1505 MSR_KVM_SYSTEM_TIME
, MSR_KVM_WALL_CLOCK
,
1506 MSR_KVM_SYSTEM_TIME_NEW
, MSR_KVM_WALL_CLOCK_NEW
,
1507 HV_X64_MSR_GUEST_OS_ID
, HV_X64_MSR_HYPERCALL
,
1508 HV_X64_MSR_TIME_REF_COUNT
, HV_X64_MSR_REFERENCE_TSC
,
1509 HV_X64_MSR_TSC_FREQUENCY
, HV_X64_MSR_APIC_FREQUENCY
,
1510 HV_X64_MSR_CRASH_P0
, HV_X64_MSR_CRASH_P1
, HV_X64_MSR_CRASH_P2
,
1511 HV_X64_MSR_CRASH_P3
, HV_X64_MSR_CRASH_P4
, HV_X64_MSR_CRASH_CTL
,
1513 HV_X64_MSR_VP_INDEX
,
1514 HV_X64_MSR_VP_RUNTIME
,
1515 HV_X64_MSR_SCONTROL
,
1516 HV_X64_MSR_STIMER0_CONFIG
,
1517 HV_X64_MSR_VP_ASSIST_PAGE
,
1518 HV_X64_MSR_REENLIGHTENMENT_CONTROL
, HV_X64_MSR_TSC_EMULATION_CONTROL
,
1519 HV_X64_MSR_TSC_EMULATION_STATUS
, HV_X64_MSR_TSC_INVARIANT_CONTROL
,
1520 HV_X64_MSR_SYNDBG_OPTIONS
,
1521 HV_X64_MSR_SYNDBG_CONTROL
, HV_X64_MSR_SYNDBG_STATUS
,
1522 HV_X64_MSR_SYNDBG_SEND_BUFFER
, HV_X64_MSR_SYNDBG_RECV_BUFFER
,
1523 HV_X64_MSR_SYNDBG_PENDING_BUFFER
,
1525 MSR_KVM_ASYNC_PF_EN
, MSR_KVM_STEAL_TIME
,
1526 MSR_KVM_PV_EOI_EN
, MSR_KVM_ASYNC_PF_INT
, MSR_KVM_ASYNC_PF_ACK
,
1528 MSR_IA32_TSC_ADJUST
,
1529 MSR_IA32_TSC_DEADLINE
,
1530 MSR_IA32_ARCH_CAPABILITIES
,
1531 MSR_IA32_PERF_CAPABILITIES
,
1532 MSR_IA32_MISC_ENABLE
,
1533 MSR_IA32_MCG_STATUS
,
1535 MSR_IA32_MCG_EXT_CTL
,
1539 MSR_MISC_FEATURES_ENABLES
,
1540 MSR_AMD64_VIRT_SPEC_CTRL
,
1541 MSR_AMD64_TSC_RATIO
,
1546 * KVM always supports the "true" VMX control MSRs, even if the host
1547 * does not. The VMX MSRs as a whole are considered "emulated" as KVM
1548 * doesn't strictly require them to exist in the host (ignoring that
1549 * KVM would refuse to load in the first place if the core set of MSRs
1550 * aren't supported).
1553 MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
1554 MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
1555 MSR_IA32_VMX_TRUE_EXIT_CTLS
,
1556 MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
1558 MSR_IA32_VMX_CR0_FIXED0
,
1559 MSR_IA32_VMX_CR4_FIXED0
,
1560 MSR_IA32_VMX_VMCS_ENUM
,
1561 MSR_IA32_VMX_PROCBASED_CTLS2
,
1562 MSR_IA32_VMX_EPT_VPID_CAP
,
1563 MSR_IA32_VMX_VMFUNC
,
1566 MSR_KVM_POLL_CONTROL
,
1569 static u32 emulated_msrs
[ARRAY_SIZE(emulated_msrs_all
)];
1570 static unsigned num_emulated_msrs
;
1573 * List of MSRs that control the existence of MSR-based features, i.e. MSRs
1574 * that are effectively CPUID leafs. VMX MSRs are also included in the set of
1575 * feature MSRs, but are handled separately to allow expedited lookups.
1577 static const u32 msr_based_features_all_except_vmx
[] = {
1580 MSR_IA32_ARCH_CAPABILITIES
,
1581 MSR_IA32_PERF_CAPABILITIES
,
1584 static u32 msr_based_features
[ARRAY_SIZE(msr_based_features_all_except_vmx
) +
1585 (KVM_LAST_EMULATED_VMX_MSR
- KVM_FIRST_EMULATED_VMX_MSR
+ 1)];
1586 static unsigned int num_msr_based_features
;
1589 * All feature MSRs except uCode revID, which tracks the currently loaded uCode
1590 * patch, are immutable once the vCPU model is defined.
1592 static bool kvm_is_immutable_feature_msr(u32 msr
)
1596 if (msr
>= KVM_FIRST_EMULATED_VMX_MSR
&& msr
<= KVM_LAST_EMULATED_VMX_MSR
)
1599 for (i
= 0; i
< ARRAY_SIZE(msr_based_features_all_except_vmx
); i
++) {
1600 if (msr
== msr_based_features_all_except_vmx
[i
])
1601 return msr
!= MSR_IA32_UCODE_REV
;
1608 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
1609 * does not yet virtualize. These include:
1610 * 10 - MISC_PACKAGE_CTRLS
1611 * 11 - ENERGY_FILTERING_CTL
1613 * 18 - FB_CLEAR_CTRL
1614 * 21 - XAPIC_DISABLE_STATUS
1615 * 23 - OVERCLOCKING_STATUS
1618 #define KVM_SUPPORTED_ARCH_CAP \
1619 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
1620 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
1621 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
1622 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
1623 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
1625 static u64
kvm_get_arch_capabilities(void)
1627 u64 data
= host_arch_capabilities
& KVM_SUPPORTED_ARCH_CAP
;
1630 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1631 * the nested hypervisor runs with NX huge pages. If it is not,
1632 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
1633 * L1 guests, so it need not worry about its own (L2) guests.
1635 data
|= ARCH_CAP_PSCHANGE_MC_NO
;
1638 * If we're doing cache flushes (either "always" or "cond")
1639 * we will do one whenever the guest does a vmlaunch/vmresume.
1640 * If an outer hypervisor is doing the cache flush for us
1641 * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
1642 * capability to the guest too, and if EPT is disabled we're not
1643 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
1644 * require a nested hypervisor to do a flush of its own.
1646 if (l1tf_vmx_mitigation
!= VMENTER_L1D_FLUSH_NEVER
)
1647 data
|= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH
;
1649 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN
))
1650 data
|= ARCH_CAP_RDCL_NO
;
1651 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1652 data
|= ARCH_CAP_SSB_NO
;
1653 if (!boot_cpu_has_bug(X86_BUG_MDS
))
1654 data
|= ARCH_CAP_MDS_NO
;
1656 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
1658 * If RTM=0 because the kernel has disabled TSX, the host might
1659 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
1660 * and therefore knows that there cannot be TAA) but keep
1661 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1662 * and we want to allow migrating those guests to tsx=off hosts.
1664 data
&= ~ARCH_CAP_TAA_NO
;
1665 } else if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
1666 data
|= ARCH_CAP_TAA_NO
;
1669 * Nothing to do here; we emulate TSX_CTRL if present on the
1670 * host so the guest can choose between disabling TSX or
1671 * using VERW to clear CPU buffers.
1675 if (!boot_cpu_has_bug(X86_BUG_GDS
) || gds_ucode_mitigated())
1676 data
|= ARCH_CAP_GDS_NO
;
1681 static int kvm_get_msr_feature(struct kvm_msr_entry
*msr
)
1683 switch (msr
->index
) {
1684 case MSR_IA32_ARCH_CAPABILITIES
:
1685 msr
->data
= kvm_get_arch_capabilities();
1687 case MSR_IA32_PERF_CAPABILITIES
:
1688 msr
->data
= kvm_caps
.supported_perf_cap
;
1690 case MSR_IA32_UCODE_REV
:
1691 rdmsrl_safe(msr
->index
, &msr
->data
);
1694 return static_call(kvm_x86_get_msr_feature
)(msr
);
1699 static int do_get_msr_feature(struct kvm_vcpu
*vcpu
, unsigned index
, u64
*data
)
1701 struct kvm_msr_entry msr
;
1705 r
= kvm_get_msr_feature(&msr
);
1707 if (r
== KVM_MSR_RET_INVALID
) {
1708 /* Unconditionally clear the output for simplicity */
1710 if (kvm_msr_ignored_check(index
, 0, false))
1722 static bool __kvm_valid_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
1724 if (efer
& EFER_AUTOIBRS
&& !guest_cpuid_has(vcpu
, X86_FEATURE_AUTOIBRS
))
1727 if (efer
& EFER_FFXSR
&& !guest_cpuid_has(vcpu
, X86_FEATURE_FXSR_OPT
))
1730 if (efer
& EFER_SVME
&& !guest_cpuid_has(vcpu
, X86_FEATURE_SVM
))
1733 if (efer
& (EFER_LME
| EFER_LMA
) &&
1734 !guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
1737 if (efer
& EFER_NX
&& !guest_cpuid_has(vcpu
, X86_FEATURE_NX
))
1743 bool kvm_valid_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
1745 if (efer
& efer_reserved_bits
)
1748 return __kvm_valid_efer(vcpu
, efer
);
1750 EXPORT_SYMBOL_GPL(kvm_valid_efer
);
1752 static int set_efer(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
1754 u64 old_efer
= vcpu
->arch
.efer
;
1755 u64 efer
= msr_info
->data
;
1758 if (efer
& efer_reserved_bits
)
1761 if (!msr_info
->host_initiated
) {
1762 if (!__kvm_valid_efer(vcpu
, efer
))
1765 if (is_paging(vcpu
) &&
1766 (vcpu
->arch
.efer
& EFER_LME
) != (efer
& EFER_LME
))
1771 efer
|= vcpu
->arch
.efer
& EFER_LMA
;
1773 r
= static_call(kvm_x86_set_efer
)(vcpu
, efer
);
1779 if ((efer
^ old_efer
) & KVM_MMU_EFER_ROLE_BITS
)
1780 kvm_mmu_reset_context(vcpu
);
1785 void kvm_enable_efer_bits(u64 mask
)
1787 efer_reserved_bits
&= ~mask
;
1789 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits
);
1791 bool kvm_msr_allowed(struct kvm_vcpu
*vcpu
, u32 index
, u32 type
)
1793 struct kvm_x86_msr_filter
*msr_filter
;
1794 struct msr_bitmap_range
*ranges
;
1795 struct kvm
*kvm
= vcpu
->kvm
;
1800 /* x2APIC MSRs do not support filtering. */
1801 if (index
>= 0x800 && index
<= 0x8ff)
1804 idx
= srcu_read_lock(&kvm
->srcu
);
1806 msr_filter
= srcu_dereference(kvm
->arch
.msr_filter
, &kvm
->srcu
);
1812 allowed
= msr_filter
->default_allow
;
1813 ranges
= msr_filter
->ranges
;
1815 for (i
= 0; i
< msr_filter
->count
; i
++) {
1816 u32 start
= ranges
[i
].base
;
1817 u32 end
= start
+ ranges
[i
].nmsrs
;
1818 u32 flags
= ranges
[i
].flags
;
1819 unsigned long *bitmap
= ranges
[i
].bitmap
;
1821 if ((index
>= start
) && (index
< end
) && (flags
& type
)) {
1822 allowed
= test_bit(index
- start
, bitmap
);
1828 srcu_read_unlock(&kvm
->srcu
, idx
);
1832 EXPORT_SYMBOL_GPL(kvm_msr_allowed
);
1835 * Write @data into the MSR specified by @index. Select MSR specific fault
1836 * checks are bypassed if @host_initiated is %true.
1837 * Returns 0 on success, non-0 otherwise.
1838 * Assumes vcpu_load() was already called.
1840 static int __kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64 data
,
1841 bool host_initiated
)
1843 struct msr_data msr
;
1848 case MSR_KERNEL_GS_BASE
:
1851 if (is_noncanonical_address(data
, vcpu
))
1854 case MSR_IA32_SYSENTER_EIP
:
1855 case MSR_IA32_SYSENTER_ESP
:
1857 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1858 * non-canonical address is written on Intel but not on
1859 * AMD (which ignores the top 32-bits, because it does
1860 * not implement 64-bit SYSENTER).
1862 * 64-bit code should hence be able to write a non-canonical
1863 * value on AMD. Making the address canonical ensures that
1864 * vmentry does not fail on Intel after writing a non-canonical
1865 * value, and that something deterministic happens if the guest
1866 * invokes 64-bit SYSENTER.
1868 data
= __canonical_address(data
, vcpu_virt_addr_bits(vcpu
));
1871 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX
))
1874 if (!host_initiated
&&
1875 !guest_cpuid_has(vcpu
, X86_FEATURE_RDTSCP
) &&
1876 !guest_cpuid_has(vcpu
, X86_FEATURE_RDPID
))
1880 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
1881 * incomplete and conflicting architectural behavior. Current
1882 * AMD CPUs completely ignore bits 63:32, i.e. they aren't
1883 * reserved and always read as zeros. Enforce Intel's reserved
1884 * bits check if and only if the guest CPU is Intel, and clear
1885 * the bits in all other cases. This ensures cross-vendor
1886 * migration will provide consistent behavior for the guest.
1888 if (guest_cpuid_is_intel(vcpu
) && (data
>> 32) != 0)
1897 msr
.host_initiated
= host_initiated
;
1899 return static_call(kvm_x86_set_msr
)(vcpu
, &msr
);
1902 static int kvm_set_msr_ignored_check(struct kvm_vcpu
*vcpu
,
1903 u32 index
, u64 data
, bool host_initiated
)
1905 int ret
= __kvm_set_msr(vcpu
, index
, data
, host_initiated
);
1907 if (ret
== KVM_MSR_RET_INVALID
)
1908 if (kvm_msr_ignored_check(index
, data
, true))
1915 * Read the MSR specified by @index into @data. Select MSR specific fault
1916 * checks are bypassed if @host_initiated is %true.
1917 * Returns 0 on success, non-0 otherwise.
1918 * Assumes vcpu_load() was already called.
1920 int __kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64
*data
,
1921 bool host_initiated
)
1923 struct msr_data msr
;
1928 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX
))
1931 if (!host_initiated
&&
1932 !guest_cpuid_has(vcpu
, X86_FEATURE_RDTSCP
) &&
1933 !guest_cpuid_has(vcpu
, X86_FEATURE_RDPID
))
1939 msr
.host_initiated
= host_initiated
;
1941 ret
= static_call(kvm_x86_get_msr
)(vcpu
, &msr
);
1947 static int kvm_get_msr_ignored_check(struct kvm_vcpu
*vcpu
,
1948 u32 index
, u64
*data
, bool host_initiated
)
1950 int ret
= __kvm_get_msr(vcpu
, index
, data
, host_initiated
);
1952 if (ret
== KVM_MSR_RET_INVALID
) {
1953 /* Unconditionally clear *data for simplicity */
1955 if (kvm_msr_ignored_check(index
, 0, false))
1962 static int kvm_get_msr_with_filter(struct kvm_vcpu
*vcpu
, u32 index
, u64
*data
)
1964 if (!kvm_msr_allowed(vcpu
, index
, KVM_MSR_FILTER_READ
))
1965 return KVM_MSR_RET_FILTERED
;
1966 return kvm_get_msr_ignored_check(vcpu
, index
, data
, false);
1969 static int kvm_set_msr_with_filter(struct kvm_vcpu
*vcpu
, u32 index
, u64 data
)
1971 if (!kvm_msr_allowed(vcpu
, index
, KVM_MSR_FILTER_WRITE
))
1972 return KVM_MSR_RET_FILTERED
;
1973 return kvm_set_msr_ignored_check(vcpu
, index
, data
, false);
1976 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64
*data
)
1978 return kvm_get_msr_ignored_check(vcpu
, index
, data
, false);
1980 EXPORT_SYMBOL_GPL(kvm_get_msr
);
1982 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64 data
)
1984 return kvm_set_msr_ignored_check(vcpu
, index
, data
, false);
1986 EXPORT_SYMBOL_GPL(kvm_set_msr
);
1988 static void complete_userspace_rdmsr(struct kvm_vcpu
*vcpu
)
1990 if (!vcpu
->run
->msr
.error
) {
1991 kvm_rax_write(vcpu
, (u32
)vcpu
->run
->msr
.data
);
1992 kvm_rdx_write(vcpu
, vcpu
->run
->msr
.data
>> 32);
1996 static int complete_emulated_msr_access(struct kvm_vcpu
*vcpu
)
1998 return complete_emulated_insn_gp(vcpu
, vcpu
->run
->msr
.error
);
2001 static int complete_emulated_rdmsr(struct kvm_vcpu
*vcpu
)
2003 complete_userspace_rdmsr(vcpu
);
2004 return complete_emulated_msr_access(vcpu
);
2007 static int complete_fast_msr_access(struct kvm_vcpu
*vcpu
)
2009 return static_call(kvm_x86_complete_emulated_msr
)(vcpu
, vcpu
->run
->msr
.error
);
2012 static int complete_fast_rdmsr(struct kvm_vcpu
*vcpu
)
2014 complete_userspace_rdmsr(vcpu
);
2015 return complete_fast_msr_access(vcpu
);
2018 static u64
kvm_msr_reason(int r
)
2021 case KVM_MSR_RET_INVALID
:
2022 return KVM_MSR_EXIT_REASON_UNKNOWN
;
2023 case KVM_MSR_RET_FILTERED
:
2024 return KVM_MSR_EXIT_REASON_FILTER
;
2026 return KVM_MSR_EXIT_REASON_INVAL
;
2030 static int kvm_msr_user_space(struct kvm_vcpu
*vcpu
, u32 index
,
2031 u32 exit_reason
, u64 data
,
2032 int (*completion
)(struct kvm_vcpu
*vcpu
),
2035 u64 msr_reason
= kvm_msr_reason(r
);
2037 /* Check if the user wanted to know about this MSR fault */
2038 if (!(vcpu
->kvm
->arch
.user_space_msr_mask
& msr_reason
))
2041 vcpu
->run
->exit_reason
= exit_reason
;
2042 vcpu
->run
->msr
.error
= 0;
2043 memset(vcpu
->run
->msr
.pad
, 0, sizeof(vcpu
->run
->msr
.pad
));
2044 vcpu
->run
->msr
.reason
= msr_reason
;
2045 vcpu
->run
->msr
.index
= index
;
2046 vcpu
->run
->msr
.data
= data
;
2047 vcpu
->arch
.complete_userspace_io
= completion
;
2052 int kvm_emulate_rdmsr(struct kvm_vcpu
*vcpu
)
2054 u32 ecx
= kvm_rcx_read(vcpu
);
2058 r
= kvm_get_msr_with_filter(vcpu
, ecx
, &data
);
2061 trace_kvm_msr_read(ecx
, data
);
2063 kvm_rax_write(vcpu
, data
& -1u);
2064 kvm_rdx_write(vcpu
, (data
>> 32) & -1u);
2066 /* MSR read failed? See if we should ask user space */
2067 if (kvm_msr_user_space(vcpu
, ecx
, KVM_EXIT_X86_RDMSR
, 0,
2068 complete_fast_rdmsr
, r
))
2070 trace_kvm_msr_read_ex(ecx
);
2073 return static_call(kvm_x86_complete_emulated_msr
)(vcpu
, r
);
2075 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr
);
2077 int kvm_emulate_wrmsr(struct kvm_vcpu
*vcpu
)
2079 u32 ecx
= kvm_rcx_read(vcpu
);
2080 u64 data
= kvm_read_edx_eax(vcpu
);
2083 r
= kvm_set_msr_with_filter(vcpu
, ecx
, data
);
2086 trace_kvm_msr_write(ecx
, data
);
2088 /* MSR write failed? See if we should ask user space */
2089 if (kvm_msr_user_space(vcpu
, ecx
, KVM_EXIT_X86_WRMSR
, data
,
2090 complete_fast_msr_access
, r
))
2092 /* Signal all other negative errors to userspace */
2095 trace_kvm_msr_write_ex(ecx
, data
);
2098 return static_call(kvm_x86_complete_emulated_msr
)(vcpu
, r
);
2100 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr
);
2102 int kvm_emulate_as_nop(struct kvm_vcpu
*vcpu
)
2104 return kvm_skip_emulated_instruction(vcpu
);
2107 int kvm_emulate_invd(struct kvm_vcpu
*vcpu
)
2109 /* Treat an INVD instruction as a NOP and just skip it. */
2110 return kvm_emulate_as_nop(vcpu
);
2112 EXPORT_SYMBOL_GPL(kvm_emulate_invd
);
2114 int kvm_handle_invalid_op(struct kvm_vcpu
*vcpu
)
2116 kvm_queue_exception(vcpu
, UD_VECTOR
);
2119 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op
);
2122 static int kvm_emulate_monitor_mwait(struct kvm_vcpu
*vcpu
, const char *insn
)
2124 if (!kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS
) &&
2125 !guest_cpuid_has(vcpu
, X86_FEATURE_MWAIT
))
2126 return kvm_handle_invalid_op(vcpu
);
2128 pr_warn_once("%s instruction emulated as NOP!\n", insn
);
2129 return kvm_emulate_as_nop(vcpu
);
2131 int kvm_emulate_mwait(struct kvm_vcpu
*vcpu
)
2133 return kvm_emulate_monitor_mwait(vcpu
, "MWAIT");
2135 EXPORT_SYMBOL_GPL(kvm_emulate_mwait
);
2137 int kvm_emulate_monitor(struct kvm_vcpu
*vcpu
)
2139 return kvm_emulate_monitor_mwait(vcpu
, "MONITOR");
2141 EXPORT_SYMBOL_GPL(kvm_emulate_monitor
);
2143 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu
*vcpu
)
2145 xfer_to_guest_mode_prepare();
2146 return vcpu
->mode
== EXITING_GUEST_MODE
|| kvm_request_pending(vcpu
) ||
2147 xfer_to_guest_mode_work_pending();
2151 * The fast path for frequent and performance sensitive wrmsr emulation,
2152 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
2153 * the latency of virtual IPI by avoiding the expensive bits of transitioning
2154 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
2155 * other cases which must be called after interrupts are enabled on the host.
2157 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu
*vcpu
, u64 data
)
2159 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(vcpu
->arch
.apic
))
2162 if (((data
& APIC_SHORT_MASK
) == APIC_DEST_NOSHORT
) &&
2163 ((data
& APIC_DEST_MASK
) == APIC_DEST_PHYSICAL
) &&
2164 ((data
& APIC_MODE_MASK
) == APIC_DM_FIXED
) &&
2165 ((u32
)(data
>> 32) != X2APIC_BROADCAST
))
2166 return kvm_x2apic_icr_write(vcpu
->arch
.apic
, data
);
2171 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu
*vcpu
, u64 data
)
2173 if (!kvm_can_use_hv_timer(vcpu
))
2176 kvm_set_lapic_tscdeadline_msr(vcpu
, data
);
2180 fastpath_t
handle_fastpath_set_msr_irqoff(struct kvm_vcpu
*vcpu
)
2182 u32 msr
= kvm_rcx_read(vcpu
);
2184 fastpath_t ret
= EXIT_FASTPATH_NONE
;
2186 kvm_vcpu_srcu_read_lock(vcpu
);
2189 case APIC_BASE_MSR
+ (APIC_ICR
>> 4):
2190 data
= kvm_read_edx_eax(vcpu
);
2191 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu
, data
)) {
2192 kvm_skip_emulated_instruction(vcpu
);
2193 ret
= EXIT_FASTPATH_EXIT_HANDLED
;
2196 case MSR_IA32_TSC_DEADLINE
:
2197 data
= kvm_read_edx_eax(vcpu
);
2198 if (!handle_fastpath_set_tscdeadline(vcpu
, data
)) {
2199 kvm_skip_emulated_instruction(vcpu
);
2200 ret
= EXIT_FASTPATH_REENTER_GUEST
;
2207 if (ret
!= EXIT_FASTPATH_NONE
)
2208 trace_kvm_msr_write(msr
, data
);
2210 kvm_vcpu_srcu_read_unlock(vcpu
);
2214 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff
);
2217 * Adapt set_msr() to msr_io()'s calling convention
2219 static int do_get_msr(struct kvm_vcpu
*vcpu
, unsigned index
, u64
*data
)
2221 return kvm_get_msr_ignored_check(vcpu
, index
, data
, true);
2224 static int do_set_msr(struct kvm_vcpu
*vcpu
, unsigned index
, u64
*data
)
2229 * Disallow writes to immutable feature MSRs after KVM_RUN. KVM does
2230 * not support modifying the guest vCPU model on the fly, e.g. changing
2231 * the nVMX capabilities while L2 is running is nonsensical. Ignore
2232 * writes of the same value, e.g. to allow userspace to blindly stuff
2233 * all MSRs when emulating RESET.
2235 if (kvm_vcpu_has_run(vcpu
) && kvm_is_immutable_feature_msr(index
)) {
2236 if (do_get_msr(vcpu
, index
, &val
) || *data
!= val
)
2242 return kvm_set_msr_ignored_check(vcpu
, index
, *data
, true);
2245 #ifdef CONFIG_X86_64
2246 struct pvclock_clock
{
2256 struct pvclock_gtod_data
{
2259 struct pvclock_clock clock
; /* extract of a clocksource struct */
2260 struct pvclock_clock raw_clock
; /* extract of a clocksource struct */
2266 static struct pvclock_gtod_data pvclock_gtod_data
;
2268 static void update_pvclock_gtod(struct timekeeper
*tk
)
2270 struct pvclock_gtod_data
*vdata
= &pvclock_gtod_data
;
2272 write_seqcount_begin(&vdata
->seq
);
2274 /* copy pvclock gtod data */
2275 vdata
->clock
.vclock_mode
= tk
->tkr_mono
.clock
->vdso_clock_mode
;
2276 vdata
->clock
.cycle_last
= tk
->tkr_mono
.cycle_last
;
2277 vdata
->clock
.mask
= tk
->tkr_mono
.mask
;
2278 vdata
->clock
.mult
= tk
->tkr_mono
.mult
;
2279 vdata
->clock
.shift
= tk
->tkr_mono
.shift
;
2280 vdata
->clock
.base_cycles
= tk
->tkr_mono
.xtime_nsec
;
2281 vdata
->clock
.offset
= tk
->tkr_mono
.base
;
2283 vdata
->raw_clock
.vclock_mode
= tk
->tkr_raw
.clock
->vdso_clock_mode
;
2284 vdata
->raw_clock
.cycle_last
= tk
->tkr_raw
.cycle_last
;
2285 vdata
->raw_clock
.mask
= tk
->tkr_raw
.mask
;
2286 vdata
->raw_clock
.mult
= tk
->tkr_raw
.mult
;
2287 vdata
->raw_clock
.shift
= tk
->tkr_raw
.shift
;
2288 vdata
->raw_clock
.base_cycles
= tk
->tkr_raw
.xtime_nsec
;
2289 vdata
->raw_clock
.offset
= tk
->tkr_raw
.base
;
2291 vdata
->wall_time_sec
= tk
->xtime_sec
;
2293 vdata
->offs_boot
= tk
->offs_boot
;
2295 write_seqcount_end(&vdata
->seq
);
2298 static s64
get_kvmclock_base_ns(void)
2300 /* Count up from boot time, but with the frequency of the raw clock. */
2301 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data
.offs_boot
));
2304 static s64
get_kvmclock_base_ns(void)
2306 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */
2307 return ktime_get_boottime_ns();
2311 static void kvm_write_wall_clock(struct kvm
*kvm
, gpa_t wall_clock
, int sec_hi_ofs
)
2315 struct pvclock_wall_clock wc
;
2322 r
= kvm_read_guest(kvm
, wall_clock
, &version
, sizeof(version
));
2327 ++version
; /* first time write, random junk */
2331 if (kvm_write_guest(kvm
, wall_clock
, &version
, sizeof(version
)))
2334 wall_nsec
= kvm_get_wall_clock_epoch(kvm
);
2336 wc
.nsec
= do_div(wall_nsec
, NSEC_PER_SEC
);
2337 wc
.sec
= (u32
)wall_nsec
; /* overflow in 2106 guest time */
2338 wc
.version
= version
;
2340 kvm_write_guest(kvm
, wall_clock
, &wc
, sizeof(wc
));
2343 wc_sec_hi
= wall_nsec
>> 32;
2344 kvm_write_guest(kvm
, wall_clock
+ sec_hi_ofs
,
2345 &wc_sec_hi
, sizeof(wc_sec_hi
));
2349 kvm_write_guest(kvm
, wall_clock
, &version
, sizeof(version
));
2352 static void kvm_write_system_time(struct kvm_vcpu
*vcpu
, gpa_t system_time
,
2353 bool old_msr
, bool host_initiated
)
2355 struct kvm_arch
*ka
= &vcpu
->kvm
->arch
;
2357 if (vcpu
->vcpu_id
== 0 && !host_initiated
) {
2358 if (ka
->boot_vcpu_runs_old_kvmclock
!= old_msr
)
2359 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
);
2361 ka
->boot_vcpu_runs_old_kvmclock
= old_msr
;
2364 vcpu
->arch
.time
= system_time
;
2365 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE
, vcpu
);
2367 /* we verify if the enable bit is set... */
2368 if (system_time
& 1)
2369 kvm_gpc_activate(&vcpu
->arch
.pv_time
, system_time
& ~1ULL,
2370 sizeof(struct pvclock_vcpu_time_info
));
2372 kvm_gpc_deactivate(&vcpu
->arch
.pv_time
);
2377 static uint32_t div_frac(uint32_t dividend
, uint32_t divisor
)
2379 do_shl32_div32(dividend
, divisor
);
2383 static void kvm_get_time_scale(uint64_t scaled_hz
, uint64_t base_hz
,
2384 s8
*pshift
, u32
*pmultiplier
)
2392 scaled64
= scaled_hz
;
2393 while (tps64
> scaled64
*2 || tps64
& 0xffffffff00000000ULL
) {
2398 tps32
= (uint32_t)tps64
;
2399 while (tps32
<= scaled64
|| scaled64
& 0xffffffff00000000ULL
) {
2400 if (scaled64
& 0xffffffff00000000ULL
|| tps32
& 0x80000000)
2408 *pmultiplier
= div_frac(scaled64
, tps32
);
2411 #ifdef CONFIG_X86_64
2412 static atomic_t kvm_guest_has_master_clock
= ATOMIC_INIT(0);
2415 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz
);
2416 static unsigned long max_tsc_khz
;
2418 static u32
adjust_tsc_khz(u32 khz
, s32 ppm
)
2420 u64 v
= (u64
)khz
* (1000000 + ppm
);
2425 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu
*vcpu
, u64 l1_multiplier
);
2427 static int set_tsc_khz(struct kvm_vcpu
*vcpu
, u32 user_tsc_khz
, bool scale
)
2431 /* Guest TSC same frequency as host TSC? */
2433 kvm_vcpu_write_tsc_multiplier(vcpu
, kvm_caps
.default_tsc_scaling_ratio
);
2437 /* TSC scaling supported? */
2438 if (!kvm_caps
.has_tsc_control
) {
2439 if (user_tsc_khz
> tsc_khz
) {
2440 vcpu
->arch
.tsc_catchup
= 1;
2441 vcpu
->arch
.tsc_always_catchup
= 1;
2444 pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2449 /* TSC scaling required - calculate ratio */
2450 ratio
= mul_u64_u32_div(1ULL << kvm_caps
.tsc_scaling_ratio_frac_bits
,
2451 user_tsc_khz
, tsc_khz
);
2453 if (ratio
== 0 || ratio
>= kvm_caps
.max_tsc_scaling_ratio
) {
2454 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2459 kvm_vcpu_write_tsc_multiplier(vcpu
, ratio
);
2463 static int kvm_set_tsc_khz(struct kvm_vcpu
*vcpu
, u32 user_tsc_khz
)
2465 u32 thresh_lo
, thresh_hi
;
2466 int use_scaling
= 0;
2468 /* tsc_khz can be zero if TSC calibration fails */
2469 if (user_tsc_khz
== 0) {
2470 /* set tsc_scaling_ratio to a safe value */
2471 kvm_vcpu_write_tsc_multiplier(vcpu
, kvm_caps
.default_tsc_scaling_ratio
);
2475 /* Compute a scale to convert nanoseconds in TSC cycles */
2476 kvm_get_time_scale(user_tsc_khz
* 1000LL, NSEC_PER_SEC
,
2477 &vcpu
->arch
.virtual_tsc_shift
,
2478 &vcpu
->arch
.virtual_tsc_mult
);
2479 vcpu
->arch
.virtual_tsc_khz
= user_tsc_khz
;
2482 * Compute the variation in TSC rate which is acceptable
2483 * within the range of tolerance and decide if the
2484 * rate being applied is within that bounds of the hardware
2485 * rate. If so, no scaling or compensation need be done.
2487 thresh_lo
= adjust_tsc_khz(tsc_khz
, -tsc_tolerance_ppm
);
2488 thresh_hi
= adjust_tsc_khz(tsc_khz
, tsc_tolerance_ppm
);
2489 if (user_tsc_khz
< thresh_lo
|| user_tsc_khz
> thresh_hi
) {
2490 pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n",
2491 user_tsc_khz
, thresh_lo
, thresh_hi
);
2494 return set_tsc_khz(vcpu
, user_tsc_khz
, use_scaling
);
2497 static u64
compute_guest_tsc(struct kvm_vcpu
*vcpu
, s64 kernel_ns
)
2499 u64 tsc
= pvclock_scale_delta(kernel_ns
-vcpu
->arch
.this_tsc_nsec
,
2500 vcpu
->arch
.virtual_tsc_mult
,
2501 vcpu
->arch
.virtual_tsc_shift
);
2502 tsc
+= vcpu
->arch
.this_tsc_write
;
2506 #ifdef CONFIG_X86_64
2507 static inline int gtod_is_based_on_tsc(int mode
)
2509 return mode
== VDSO_CLOCKMODE_TSC
|| mode
== VDSO_CLOCKMODE_HVCLOCK
;
2513 static void kvm_track_tsc_matching(struct kvm_vcpu
*vcpu
)
2515 #ifdef CONFIG_X86_64
2517 struct kvm_arch
*ka
= &vcpu
->kvm
->arch
;
2518 struct pvclock_gtod_data
*gtod
= &pvclock_gtod_data
;
2520 vcpus_matched
= (ka
->nr_vcpus_matched_tsc
+ 1 ==
2521 atomic_read(&vcpu
->kvm
->online_vcpus
));
2524 * Once the masterclock is enabled, always perform request in
2525 * order to update it.
2527 * In order to enable masterclock, the host clocksource must be TSC
2528 * and the vcpus need to have matched TSCs. When that happens,
2529 * perform request to enable masterclock.
2531 if (ka
->use_master_clock
||
2532 (gtod_is_based_on_tsc(gtod
->clock
.vclock_mode
) && vcpus_matched
))
2533 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
);
2535 trace_kvm_track_tsc(vcpu
->vcpu_id
, ka
->nr_vcpus_matched_tsc
,
2536 atomic_read(&vcpu
->kvm
->online_vcpus
),
2537 ka
->use_master_clock
, gtod
->clock
.vclock_mode
);
2542 * Multiply tsc by a fixed point number represented by ratio.
2544 * The most significant 64-N bits (mult) of ratio represent the
2545 * integral part of the fixed point number; the remaining N bits
2546 * (frac) represent the fractional part, ie. ratio represents a fixed
2547 * point number (mult + frac * 2^(-N)).
2549 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
2551 static inline u64
__scale_tsc(u64 ratio
, u64 tsc
)
2553 return mul_u64_u64_shr(tsc
, ratio
, kvm_caps
.tsc_scaling_ratio_frac_bits
);
2556 u64
kvm_scale_tsc(u64 tsc
, u64 ratio
)
2560 if (ratio
!= kvm_caps
.default_tsc_scaling_ratio
)
2561 _tsc
= __scale_tsc(ratio
, tsc
);
2566 static u64
kvm_compute_l1_tsc_offset(struct kvm_vcpu
*vcpu
, u64 target_tsc
)
2570 tsc
= kvm_scale_tsc(rdtsc(), vcpu
->arch
.l1_tsc_scaling_ratio
);
2572 return target_tsc
- tsc
;
2575 u64
kvm_read_l1_tsc(struct kvm_vcpu
*vcpu
, u64 host_tsc
)
2577 return vcpu
->arch
.l1_tsc_offset
+
2578 kvm_scale_tsc(host_tsc
, vcpu
->arch
.l1_tsc_scaling_ratio
);
2580 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc
);
2582 u64
kvm_calc_nested_tsc_offset(u64 l1_offset
, u64 l2_offset
, u64 l2_multiplier
)
2586 if (l2_multiplier
== kvm_caps
.default_tsc_scaling_ratio
)
2587 nested_offset
= l1_offset
;
2589 nested_offset
= mul_s64_u64_shr((s64
) l1_offset
, l2_multiplier
,
2590 kvm_caps
.tsc_scaling_ratio_frac_bits
);
2592 nested_offset
+= l2_offset
;
2593 return nested_offset
;
2595 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset
);
2597 u64
kvm_calc_nested_tsc_multiplier(u64 l1_multiplier
, u64 l2_multiplier
)
2599 if (l2_multiplier
!= kvm_caps
.default_tsc_scaling_ratio
)
2600 return mul_u64_u64_shr(l1_multiplier
, l2_multiplier
,
2601 kvm_caps
.tsc_scaling_ratio_frac_bits
);
2603 return l1_multiplier
;
2605 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier
);
2607 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu
*vcpu
, u64 l1_offset
)
2609 trace_kvm_write_tsc_offset(vcpu
->vcpu_id
,
2610 vcpu
->arch
.l1_tsc_offset
,
2613 vcpu
->arch
.l1_tsc_offset
= l1_offset
;
2616 * If we are here because L1 chose not to trap WRMSR to TSC then
2617 * according to the spec this should set L1's TSC (as opposed to
2618 * setting L1's offset for L2).
2620 if (is_guest_mode(vcpu
))
2621 vcpu
->arch
.tsc_offset
= kvm_calc_nested_tsc_offset(
2623 static_call(kvm_x86_get_l2_tsc_offset
)(vcpu
),
2624 static_call(kvm_x86_get_l2_tsc_multiplier
)(vcpu
));
2626 vcpu
->arch
.tsc_offset
= l1_offset
;
2628 static_call(kvm_x86_write_tsc_offset
)(vcpu
);
2631 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu
*vcpu
, u64 l1_multiplier
)
2633 vcpu
->arch
.l1_tsc_scaling_ratio
= l1_multiplier
;
2635 /* Userspace is changing the multiplier while L2 is active */
2636 if (is_guest_mode(vcpu
))
2637 vcpu
->arch
.tsc_scaling_ratio
= kvm_calc_nested_tsc_multiplier(
2639 static_call(kvm_x86_get_l2_tsc_multiplier
)(vcpu
));
2641 vcpu
->arch
.tsc_scaling_ratio
= l1_multiplier
;
2643 if (kvm_caps
.has_tsc_control
)
2644 static_call(kvm_x86_write_tsc_multiplier
)(vcpu
);
2647 static inline bool kvm_check_tsc_unstable(void)
2649 #ifdef CONFIG_X86_64
2651 * TSC is marked unstable when we're running on Hyper-V,
2652 * 'TSC page' clocksource is good.
2654 if (pvclock_gtod_data
.clock
.vclock_mode
== VDSO_CLOCKMODE_HVCLOCK
)
2657 return check_tsc_unstable();
2661 * Infers attempts to synchronize the guest's tsc from host writes. Sets the
2662 * offset for the vcpu and tracks the TSC matching generation that the vcpu
2665 static void __kvm_synchronize_tsc(struct kvm_vcpu
*vcpu
, u64 offset
, u64 tsc
,
2666 u64 ns
, bool matched
)
2668 struct kvm
*kvm
= vcpu
->kvm
;
2670 lockdep_assert_held(&kvm
->arch
.tsc_write_lock
);
2673 * We also track th most recent recorded KHZ, write and time to
2674 * allow the matching interval to be extended at each write.
2676 kvm
->arch
.last_tsc_nsec
= ns
;
2677 kvm
->arch
.last_tsc_write
= tsc
;
2678 kvm
->arch
.last_tsc_khz
= vcpu
->arch
.virtual_tsc_khz
;
2679 kvm
->arch
.last_tsc_offset
= offset
;
2681 vcpu
->arch
.last_guest_tsc
= tsc
;
2683 kvm_vcpu_write_tsc_offset(vcpu
, offset
);
2687 * We split periods of matched TSC writes into generations.
2688 * For each generation, we track the original measured
2689 * nanosecond time, offset, and write, so if TSCs are in
2690 * sync, we can match exact offset, and if not, we can match
2691 * exact software computation in compute_guest_tsc()
2693 * These values are tracked in kvm->arch.cur_xxx variables.
2695 kvm
->arch
.cur_tsc_generation
++;
2696 kvm
->arch
.cur_tsc_nsec
= ns
;
2697 kvm
->arch
.cur_tsc_write
= tsc
;
2698 kvm
->arch
.cur_tsc_offset
= offset
;
2699 kvm
->arch
.nr_vcpus_matched_tsc
= 0;
2700 } else if (vcpu
->arch
.this_tsc_generation
!= kvm
->arch
.cur_tsc_generation
) {
2701 kvm
->arch
.nr_vcpus_matched_tsc
++;
2704 /* Keep track of which generation this VCPU has synchronized to */
2705 vcpu
->arch
.this_tsc_generation
= kvm
->arch
.cur_tsc_generation
;
2706 vcpu
->arch
.this_tsc_nsec
= kvm
->arch
.cur_tsc_nsec
;
2707 vcpu
->arch
.this_tsc_write
= kvm
->arch
.cur_tsc_write
;
2709 kvm_track_tsc_matching(vcpu
);
2712 static void kvm_synchronize_tsc(struct kvm_vcpu
*vcpu
, u64
*user_value
)
2714 u64 data
= user_value
? *user_value
: 0;
2715 struct kvm
*kvm
= vcpu
->kvm
;
2716 u64 offset
, ns
, elapsed
;
2717 unsigned long flags
;
2718 bool matched
= false;
2719 bool synchronizing
= false;
2721 raw_spin_lock_irqsave(&kvm
->arch
.tsc_write_lock
, flags
);
2722 offset
= kvm_compute_l1_tsc_offset(vcpu
, data
);
2723 ns
= get_kvmclock_base_ns();
2724 elapsed
= ns
- kvm
->arch
.last_tsc_nsec
;
2726 if (vcpu
->arch
.virtual_tsc_khz
) {
2729 * Force synchronization when creating a vCPU, or when
2730 * userspace explicitly writes a zero value.
2732 synchronizing
= true;
2733 } else if (kvm
->arch
.user_set_tsc
) {
2734 u64 tsc_exp
= kvm
->arch
.last_tsc_write
+
2735 nsec_to_cycles(vcpu
, elapsed
);
2736 u64 tsc_hz
= vcpu
->arch
.virtual_tsc_khz
* 1000LL;
2738 * Here lies UAPI baggage: when a user-initiated TSC write has
2739 * a small delta (1 second) of virtual cycle time against the
2740 * previously set vCPU, we assume that they were intended to be
2741 * in sync and the delta was only due to the racy nature of the
2744 * This trick falls down when restoring a guest which genuinely
2745 * has been running for less time than the 1 second of imprecision
2746 * which we allow for in the legacy API. In this case, the first
2747 * value written by userspace (on any vCPU) should not be subject
2748 * to this 'correction' to make it sync up with values that only
2749 * come from the kernel's default vCPU creation. Make the 1-second
2750 * slop hack only trigger if the user_set_tsc flag is already set.
2752 synchronizing
= data
< tsc_exp
+ tsc_hz
&&
2753 data
+ tsc_hz
> tsc_exp
;
2758 kvm
->arch
.user_set_tsc
= true;
2761 * For a reliable TSC, we can match TSC offsets, and for an unstable
2762 * TSC, we add elapsed time in this computation. We could let the
2763 * compensation code attempt to catch up if we fall behind, but
2764 * it's better to try to match offsets from the beginning.
2766 if (synchronizing
&&
2767 vcpu
->arch
.virtual_tsc_khz
== kvm
->arch
.last_tsc_khz
) {
2768 if (!kvm_check_tsc_unstable()) {
2769 offset
= kvm
->arch
.cur_tsc_offset
;
2771 u64 delta
= nsec_to_cycles(vcpu
, elapsed
);
2773 offset
= kvm_compute_l1_tsc_offset(vcpu
, data
);
2778 __kvm_synchronize_tsc(vcpu
, offset
, data
, ns
, matched
);
2779 raw_spin_unlock_irqrestore(&kvm
->arch
.tsc_write_lock
, flags
);
2782 static inline void adjust_tsc_offset_guest(struct kvm_vcpu
*vcpu
,
2785 u64 tsc_offset
= vcpu
->arch
.l1_tsc_offset
;
2786 kvm_vcpu_write_tsc_offset(vcpu
, tsc_offset
+ adjustment
);
2789 static inline void adjust_tsc_offset_host(struct kvm_vcpu
*vcpu
, s64 adjustment
)
2791 if (vcpu
->arch
.l1_tsc_scaling_ratio
!= kvm_caps
.default_tsc_scaling_ratio
)
2792 WARN_ON(adjustment
< 0);
2793 adjustment
= kvm_scale_tsc((u64
) adjustment
,
2794 vcpu
->arch
.l1_tsc_scaling_ratio
);
2795 adjust_tsc_offset_guest(vcpu
, adjustment
);
2798 #ifdef CONFIG_X86_64
2800 static u64
read_tsc(void)
2802 u64 ret
= (u64
)rdtsc_ordered();
2803 u64 last
= pvclock_gtod_data
.clock
.cycle_last
;
2805 if (likely(ret
>= last
))
2809 * GCC likes to generate cmov here, but this branch is extremely
2810 * predictable (it's just a function of time and the likely is
2811 * very likely) and there's a data dependence, so force GCC
2812 * to generate a branch instead. I don't barrier() because
2813 * we don't actually need a barrier, and if this function
2814 * ever gets inlined it will generate worse code.
2820 static inline u64
vgettsc(struct pvclock_clock
*clock
, u64
*tsc_timestamp
,
2826 switch (clock
->vclock_mode
) {
2827 case VDSO_CLOCKMODE_HVCLOCK
:
2828 if (hv_read_tsc_page_tsc(hv_get_tsc_page(),
2829 tsc_timestamp
, &tsc_pg_val
)) {
2830 /* TSC page valid */
2831 *mode
= VDSO_CLOCKMODE_HVCLOCK
;
2832 v
= (tsc_pg_val
- clock
->cycle_last
) &
2835 /* TSC page invalid */
2836 *mode
= VDSO_CLOCKMODE_NONE
;
2839 case VDSO_CLOCKMODE_TSC
:
2840 *mode
= VDSO_CLOCKMODE_TSC
;
2841 *tsc_timestamp
= read_tsc();
2842 v
= (*tsc_timestamp
- clock
->cycle_last
) &
2846 *mode
= VDSO_CLOCKMODE_NONE
;
2849 if (*mode
== VDSO_CLOCKMODE_NONE
)
2850 *tsc_timestamp
= v
= 0;
2852 return v
* clock
->mult
;
2855 static int do_monotonic_raw(s64
*t
, u64
*tsc_timestamp
)
2857 struct pvclock_gtod_data
*gtod
= &pvclock_gtod_data
;
2863 seq
= read_seqcount_begin(>od
->seq
);
2864 ns
= gtod
->raw_clock
.base_cycles
;
2865 ns
+= vgettsc(>od
->raw_clock
, tsc_timestamp
, &mode
);
2866 ns
>>= gtod
->raw_clock
.shift
;
2867 ns
+= ktime_to_ns(ktime_add(gtod
->raw_clock
.offset
, gtod
->offs_boot
));
2868 } while (unlikely(read_seqcount_retry(>od
->seq
, seq
)));
2874 static int do_realtime(struct timespec64
*ts
, u64
*tsc_timestamp
)
2876 struct pvclock_gtod_data
*gtod
= &pvclock_gtod_data
;
2882 seq
= read_seqcount_begin(>od
->seq
);
2883 ts
->tv_sec
= gtod
->wall_time_sec
;
2884 ns
= gtod
->clock
.base_cycles
;
2885 ns
+= vgettsc(>od
->clock
, tsc_timestamp
, &mode
);
2886 ns
>>= gtod
->clock
.shift
;
2887 } while (unlikely(read_seqcount_retry(>od
->seq
, seq
)));
2889 ts
->tv_sec
+= __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
2895 /* returns true if host is using TSC based clocksource */
2896 static bool kvm_get_time_and_clockread(s64
*kernel_ns
, u64
*tsc_timestamp
)
2898 /* checked again under seqlock below */
2899 if (!gtod_is_based_on_tsc(pvclock_gtod_data
.clock
.vclock_mode
))
2902 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns
,
2906 /* returns true if host is using TSC based clocksource */
2907 static bool kvm_get_walltime_and_clockread(struct timespec64
*ts
,
2910 /* checked again under seqlock below */
2911 if (!gtod_is_based_on_tsc(pvclock_gtod_data
.clock
.vclock_mode
))
2914 return gtod_is_based_on_tsc(do_realtime(ts
, tsc_timestamp
));
2920 * Assuming a stable TSC across physical CPUS, and a stable TSC
2921 * across virtual CPUs, the following condition is possible.
2922 * Each numbered line represents an event visible to both
2923 * CPUs at the next numbered event.
2925 * "timespecX" represents host monotonic time. "tscX" represents
2928 * VCPU0 on CPU0 | VCPU1 on CPU1
2930 * 1. read timespec0,tsc0
2931 * 2. | timespec1 = timespec0 + N
2933 * 3. transition to guest | transition to guest
2934 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2935 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2936 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2938 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
2941 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2943 * - 0 < N - M => M < N
2945 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
2946 * always the case (the difference between two distinct xtime instances
2947 * might be smaller then the difference between corresponding TSC reads,
2948 * when updating guest vcpus pvclock areas).
2950 * To avoid that problem, do not allow visibility of distinct
2951 * system_timestamp/tsc_timestamp values simultaneously: use a master
2952 * copy of host monotonic time values. Update that master copy
2955 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
2959 static void pvclock_update_vm_gtod_copy(struct kvm
*kvm
)
2961 #ifdef CONFIG_X86_64
2962 struct kvm_arch
*ka
= &kvm
->arch
;
2964 bool host_tsc_clocksource
, vcpus_matched
;
2966 lockdep_assert_held(&kvm
->arch
.tsc_write_lock
);
2967 vcpus_matched
= (ka
->nr_vcpus_matched_tsc
+ 1 ==
2968 atomic_read(&kvm
->online_vcpus
));
2971 * If the host uses TSC clock, then passthrough TSC as stable
2974 host_tsc_clocksource
= kvm_get_time_and_clockread(
2975 &ka
->master_kernel_ns
,
2976 &ka
->master_cycle_now
);
2978 ka
->use_master_clock
= host_tsc_clocksource
&& vcpus_matched
2979 && !ka
->backwards_tsc_observed
2980 && !ka
->boot_vcpu_runs_old_kvmclock
;
2982 if (ka
->use_master_clock
)
2983 atomic_set(&kvm_guest_has_master_clock
, 1);
2985 vclock_mode
= pvclock_gtod_data
.clock
.vclock_mode
;
2986 trace_kvm_update_master_clock(ka
->use_master_clock
, vclock_mode
,
2991 static void kvm_make_mclock_inprogress_request(struct kvm
*kvm
)
2993 kvm_make_all_cpus_request(kvm
, KVM_REQ_MCLOCK_INPROGRESS
);
2996 static void __kvm_start_pvclock_update(struct kvm
*kvm
)
2998 raw_spin_lock_irq(&kvm
->arch
.tsc_write_lock
);
2999 write_seqcount_begin(&kvm
->arch
.pvclock_sc
);
3002 static void kvm_start_pvclock_update(struct kvm
*kvm
)
3004 kvm_make_mclock_inprogress_request(kvm
);
3006 /* no guest entries from this point */
3007 __kvm_start_pvclock_update(kvm
);
3010 static void kvm_end_pvclock_update(struct kvm
*kvm
)
3012 struct kvm_arch
*ka
= &kvm
->arch
;
3013 struct kvm_vcpu
*vcpu
;
3016 write_seqcount_end(&ka
->pvclock_sc
);
3017 raw_spin_unlock_irq(&ka
->tsc_write_lock
);
3018 kvm_for_each_vcpu(i
, vcpu
, kvm
)
3019 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
3021 /* guest entries allowed */
3022 kvm_for_each_vcpu(i
, vcpu
, kvm
)
3023 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS
, vcpu
);
3026 static void kvm_update_masterclock(struct kvm
*kvm
)
3028 kvm_hv_request_tsc_page_update(kvm
);
3029 kvm_start_pvclock_update(kvm
);
3030 pvclock_update_vm_gtod_copy(kvm
);
3031 kvm_end_pvclock_update(kvm
);
3035 * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's
3036 * per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz
3037 * can change during boot even if the TSC is constant, as it's possible for KVM
3038 * to be loaded before TSC calibration completes. Ideally, KVM would get a
3039 * notification when calibration completes, but practically speaking calibration
3040 * will complete before userspace is alive enough to create VMs.
3042 static unsigned long get_cpu_tsc_khz(void)
3044 if (static_cpu_has(X86_FEATURE_CONSTANT_TSC
))
3047 return __this_cpu_read(cpu_tsc_khz
);
3050 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
3051 static void __get_kvmclock(struct kvm
*kvm
, struct kvm_clock_data
*data
)
3053 struct kvm_arch
*ka
= &kvm
->arch
;
3054 struct pvclock_vcpu_time_info hv_clock
;
3056 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
3060 if (ka
->use_master_clock
&&
3061 (static_cpu_has(X86_FEATURE_CONSTANT_TSC
) || __this_cpu_read(cpu_tsc_khz
))) {
3062 #ifdef CONFIG_X86_64
3063 struct timespec64 ts
;
3065 if (kvm_get_walltime_and_clockread(&ts
, &data
->host_tsc
)) {
3066 data
->realtime
= ts
.tv_nsec
+ NSEC_PER_SEC
* ts
.tv_sec
;
3067 data
->flags
|= KVM_CLOCK_REALTIME
| KVM_CLOCK_HOST_TSC
;
3070 data
->host_tsc
= rdtsc();
3072 data
->flags
|= KVM_CLOCK_TSC_STABLE
;
3073 hv_clock
.tsc_timestamp
= ka
->master_cycle_now
;
3074 hv_clock
.system_time
= ka
->master_kernel_ns
+ ka
->kvmclock_offset
;
3075 kvm_get_time_scale(NSEC_PER_SEC
, get_cpu_tsc_khz() * 1000LL,
3076 &hv_clock
.tsc_shift
,
3077 &hv_clock
.tsc_to_system_mul
);
3078 data
->clock
= __pvclock_read_cycles(&hv_clock
, data
->host_tsc
);
3080 data
->clock
= get_kvmclock_base_ns() + ka
->kvmclock_offset
;
3086 static void get_kvmclock(struct kvm
*kvm
, struct kvm_clock_data
*data
)
3088 struct kvm_arch
*ka
= &kvm
->arch
;
3092 seq
= read_seqcount_begin(&ka
->pvclock_sc
);
3093 __get_kvmclock(kvm
, data
);
3094 } while (read_seqcount_retry(&ka
->pvclock_sc
, seq
));
3097 u64
get_kvmclock_ns(struct kvm
*kvm
)
3099 struct kvm_clock_data data
;
3101 get_kvmclock(kvm
, &data
);
3105 static void kvm_setup_guest_pvclock(struct kvm_vcpu
*v
,
3106 struct gfn_to_pfn_cache
*gpc
,
3107 unsigned int offset
)
3109 struct kvm_vcpu_arch
*vcpu
= &v
->arch
;
3110 struct pvclock_vcpu_time_info
*guest_hv_clock
;
3111 unsigned long flags
;
3113 read_lock_irqsave(&gpc
->lock
, flags
);
3114 while (!kvm_gpc_check(gpc
, offset
+ sizeof(*guest_hv_clock
))) {
3115 read_unlock_irqrestore(&gpc
->lock
, flags
);
3117 if (kvm_gpc_refresh(gpc
, offset
+ sizeof(*guest_hv_clock
)))
3120 read_lock_irqsave(&gpc
->lock
, flags
);
3123 guest_hv_clock
= (void *)(gpc
->khva
+ offset
);
3126 * This VCPU is paused, but it's legal for a guest to read another
3127 * VCPU's kvmclock, so we really have to follow the specification where
3128 * it says that version is odd if data is being modified, and even after
3132 guest_hv_clock
->version
= vcpu
->hv_clock
.version
= (guest_hv_clock
->version
+ 1) | 1;
3135 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3136 vcpu
->hv_clock
.flags
|= (guest_hv_clock
->flags
& PVCLOCK_GUEST_STOPPED
);
3138 if (vcpu
->pvclock_set_guest_stopped_request
) {
3139 vcpu
->hv_clock
.flags
|= PVCLOCK_GUEST_STOPPED
;
3140 vcpu
->pvclock_set_guest_stopped_request
= false;
3143 memcpy(guest_hv_clock
, &vcpu
->hv_clock
, sizeof(*guest_hv_clock
));
3146 guest_hv_clock
->version
= ++vcpu
->hv_clock
.version
;
3148 mark_page_dirty_in_slot(v
->kvm
, gpc
->memslot
, gpc
->gpa
>> PAGE_SHIFT
);
3149 read_unlock_irqrestore(&gpc
->lock
, flags
);
3151 trace_kvm_pvclock_update(v
->vcpu_id
, &vcpu
->hv_clock
);
3154 static int kvm_guest_time_update(struct kvm_vcpu
*v
)
3156 unsigned long flags
, tgt_tsc_khz
;
3158 struct kvm_vcpu_arch
*vcpu
= &v
->arch
;
3159 struct kvm_arch
*ka
= &v
->kvm
->arch
;
3161 u64 tsc_timestamp
, host_tsc
;
3163 bool use_master_clock
;
3169 * If the host uses TSC clock, then passthrough TSC as stable
3173 seq
= read_seqcount_begin(&ka
->pvclock_sc
);
3174 use_master_clock
= ka
->use_master_clock
;
3175 if (use_master_clock
) {
3176 host_tsc
= ka
->master_cycle_now
;
3177 kernel_ns
= ka
->master_kernel_ns
;
3179 } while (read_seqcount_retry(&ka
->pvclock_sc
, seq
));
3181 /* Keep irq disabled to prevent changes to the clock */
3182 local_irq_save(flags
);
3183 tgt_tsc_khz
= get_cpu_tsc_khz();
3184 if (unlikely(tgt_tsc_khz
== 0)) {
3185 local_irq_restore(flags
);
3186 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, v
);
3189 if (!use_master_clock
) {
3191 kernel_ns
= get_kvmclock_base_ns();
3194 tsc_timestamp
= kvm_read_l1_tsc(v
, host_tsc
);
3197 * We may have to catch up the TSC to match elapsed wall clock
3198 * time for two reasons, even if kvmclock is used.
3199 * 1) CPU could have been running below the maximum TSC rate
3200 * 2) Broken TSC compensation resets the base at each VCPU
3201 * entry to avoid unknown leaps of TSC even when running
3202 * again on the same CPU. This may cause apparent elapsed
3203 * time to disappear, and the guest to stand still or run
3206 if (vcpu
->tsc_catchup
) {
3207 u64 tsc
= compute_guest_tsc(v
, kernel_ns
);
3208 if (tsc
> tsc_timestamp
) {
3209 adjust_tsc_offset_guest(v
, tsc
- tsc_timestamp
);
3210 tsc_timestamp
= tsc
;
3214 local_irq_restore(flags
);
3216 /* With all the info we got, fill in the values */
3218 if (kvm_caps
.has_tsc_control
)
3219 tgt_tsc_khz
= kvm_scale_tsc(tgt_tsc_khz
,
3220 v
->arch
.l1_tsc_scaling_ratio
);
3222 if (unlikely(vcpu
->hw_tsc_khz
!= tgt_tsc_khz
)) {
3223 kvm_get_time_scale(NSEC_PER_SEC
, tgt_tsc_khz
* 1000LL,
3224 &vcpu
->hv_clock
.tsc_shift
,
3225 &vcpu
->hv_clock
.tsc_to_system_mul
);
3226 vcpu
->hw_tsc_khz
= tgt_tsc_khz
;
3227 kvm_xen_update_tsc_info(v
);
3230 vcpu
->hv_clock
.tsc_timestamp
= tsc_timestamp
;
3231 vcpu
->hv_clock
.system_time
= kernel_ns
+ v
->kvm
->arch
.kvmclock_offset
;
3232 vcpu
->last_guest_tsc
= tsc_timestamp
;
3234 /* If the host uses TSC clocksource, then it is stable */
3236 if (use_master_clock
)
3237 pvclock_flags
|= PVCLOCK_TSC_STABLE_BIT
;
3239 vcpu
->hv_clock
.flags
= pvclock_flags
;
3241 if (vcpu
->pv_time
.active
)
3242 kvm_setup_guest_pvclock(v
, &vcpu
->pv_time
, 0);
3243 #ifdef CONFIG_KVM_XEN
3244 if (vcpu
->xen
.vcpu_info_cache
.active
)
3245 kvm_setup_guest_pvclock(v
, &vcpu
->xen
.vcpu_info_cache
,
3246 offsetof(struct compat_vcpu_info
, time
));
3247 if (vcpu
->xen
.vcpu_time_info_cache
.active
)
3248 kvm_setup_guest_pvclock(v
, &vcpu
->xen
.vcpu_time_info_cache
, 0);
3250 kvm_hv_setup_tsc_page(v
->kvm
, &vcpu
->hv_clock
);
3255 * The pvclock_wall_clock ABI tells the guest the wall clock time at
3256 * which it started (i.e. its epoch, when its kvmclock was zero).
3258 * In fact those clocks are subtly different; wall clock frequency is
3259 * adjusted by NTP and has leap seconds, while the kvmclock is a
3260 * simple function of the TSC without any such adjustment.
3262 * Perhaps the ABI should have exposed CLOCK_TAI and a ratio between
3263 * that and kvmclock, but even that would be subject to change over
3266 * Attempt to calculate the epoch at a given moment using the *same*
3267 * TSC reading via kvm_get_walltime_and_clockread() to obtain both
3268 * wallclock and kvmclock times, and subtracting one from the other.
3270 * Fall back to using their values at slightly different moments by
3271 * calling ktime_get_real_ns() and get_kvmclock_ns() separately.
3273 uint64_t kvm_get_wall_clock_epoch(struct kvm
*kvm
)
3275 #ifdef CONFIG_X86_64
3276 struct pvclock_vcpu_time_info hv_clock
;
3277 struct kvm_arch
*ka
= &kvm
->arch
;
3278 unsigned long seq
, local_tsc_khz
;
3279 struct timespec64 ts
;
3283 seq
= read_seqcount_begin(&ka
->pvclock_sc
);
3286 if (!ka
->use_master_clock
)
3290 * The TSC read and the call to get_cpu_tsc_khz() must happen
3295 local_tsc_khz
= get_cpu_tsc_khz();
3297 if (local_tsc_khz
&&
3298 !kvm_get_walltime_and_clockread(&ts
, &host_tsc
))
3299 local_tsc_khz
= 0; /* Fall back to old method */
3304 * These values must be snapshotted within the seqcount loop.
3305 * After that, it's just mathematics which can happen on any
3308 hv_clock
.tsc_timestamp
= ka
->master_cycle_now
;
3309 hv_clock
.system_time
= ka
->master_kernel_ns
+ ka
->kvmclock_offset
;
3311 } while (read_seqcount_retry(&ka
->pvclock_sc
, seq
));
3314 * If the conditions were right, and obtaining the wallclock+TSC was
3315 * successful, calculate the KVM clock at the corresponding time and
3316 * subtract one from the other to get the guest's epoch in nanoseconds
3319 if (local_tsc_khz
) {
3320 kvm_get_time_scale(NSEC_PER_SEC
, local_tsc_khz
* NSEC_PER_USEC
,
3321 &hv_clock
.tsc_shift
,
3322 &hv_clock
.tsc_to_system_mul
);
3323 return ts
.tv_nsec
+ NSEC_PER_SEC
* ts
.tv_sec
-
3324 __pvclock_read_cycles(&hv_clock
, host_tsc
);
3327 return ktime_get_real_ns() - get_kvmclock_ns(kvm
);
3331 * kvmclock updates which are isolated to a given vcpu, such as
3332 * vcpu->cpu migration, should not allow system_timestamp from
3333 * the rest of the vcpus to remain static. Otherwise ntp frequency
3334 * correction applies to one vcpu's system_timestamp but not
3337 * So in those cases, request a kvmclock update for all vcpus.
3338 * We need to rate-limit these requests though, as they can
3339 * considerably slow guests that have a large number of vcpus.
3340 * The time for a remote vcpu to update its kvmclock is bound
3341 * by the delay we use to rate-limit the updates.
3344 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
3346 static void kvmclock_update_fn(struct work_struct
*work
)
3349 struct delayed_work
*dwork
= to_delayed_work(work
);
3350 struct kvm_arch
*ka
= container_of(dwork
, struct kvm_arch
,
3351 kvmclock_update_work
);
3352 struct kvm
*kvm
= container_of(ka
, struct kvm
, arch
);
3353 struct kvm_vcpu
*vcpu
;
3355 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
3356 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
3357 kvm_vcpu_kick(vcpu
);
3361 static void kvm_gen_kvmclock_update(struct kvm_vcpu
*v
)
3363 struct kvm
*kvm
= v
->kvm
;
3365 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, v
);
3366 schedule_delayed_work(&kvm
->arch
.kvmclock_update_work
,
3367 KVMCLOCK_UPDATE_DELAY
);
3370 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
3372 static void kvmclock_sync_fn(struct work_struct
*work
)
3374 struct delayed_work
*dwork
= to_delayed_work(work
);
3375 struct kvm_arch
*ka
= container_of(dwork
, struct kvm_arch
,
3376 kvmclock_sync_work
);
3377 struct kvm
*kvm
= container_of(ka
, struct kvm
, arch
);
3379 schedule_delayed_work(&kvm
->arch
.kvmclock_update_work
, 0);
3380 schedule_delayed_work(&kvm
->arch
.kvmclock_sync_work
,
3381 KVMCLOCK_SYNC_PERIOD
);
3384 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */
3385 static bool is_mci_control_msr(u32 msr
)
3387 return (msr
& 3) == 0;
3389 static bool is_mci_status_msr(u32 msr
)
3391 return (msr
& 3) == 1;
3395 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
3397 static bool can_set_mci_status(struct kvm_vcpu
*vcpu
)
3399 /* McStatusWrEn enabled? */
3400 if (guest_cpuid_is_amd_or_hygon(vcpu
))
3401 return !!(vcpu
->arch
.msr_hwcr
& BIT_ULL(18));
3406 static int set_msr_mce(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
3408 u64 mcg_cap
= vcpu
->arch
.mcg_cap
;
3409 unsigned bank_num
= mcg_cap
& 0xff;
3410 u32 msr
= msr_info
->index
;
3411 u64 data
= msr_info
->data
;
3412 u32 offset
, last_msr
;
3415 case MSR_IA32_MCG_STATUS
:
3416 vcpu
->arch
.mcg_status
= data
;
3418 case MSR_IA32_MCG_CTL
:
3419 if (!(mcg_cap
& MCG_CTL_P
) &&
3420 (data
|| !msr_info
->host_initiated
))
3422 if (data
!= 0 && data
!= ~(u64
)0)
3424 vcpu
->arch
.mcg_ctl
= data
;
3426 case MSR_IA32_MC0_CTL2
... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS
) - 1:
3427 last_msr
= MSR_IA32_MCx_CTL2(bank_num
) - 1;
3431 if (!(mcg_cap
& MCG_CMCI_P
) && (data
|| !msr_info
->host_initiated
))
3433 /* An attempt to write a 1 to a reserved bit raises #GP */
3434 if (data
& ~(MCI_CTL2_CMCI_EN
| MCI_CTL2_CMCI_THRESHOLD_MASK
))
3436 offset
= array_index_nospec(msr
- MSR_IA32_MC0_CTL2
,
3437 last_msr
+ 1 - MSR_IA32_MC0_CTL2
);
3438 vcpu
->arch
.mci_ctl2_banks
[offset
] = data
;
3440 case MSR_IA32_MC0_CTL
... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS
) - 1:
3441 last_msr
= MSR_IA32_MCx_CTL(bank_num
) - 1;
3446 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other
3447 * values are architecturally undefined. But, some Linux
3448 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB
3449 * issue on AMD K8s, allow bit 10 to be clear when setting all
3450 * other bits in order to avoid an uncaught #GP in the guest.
3452 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable,
3453 * single-bit ECC data errors.
3455 if (is_mci_control_msr(msr
) &&
3456 data
!= 0 && (data
| (1 << 10) | 1) != ~(u64
)0)
3460 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR.
3461 * AMD-based CPUs allow non-zero values, but if and only if
3462 * HWCR[McStatusWrEn] is set.
3464 if (!msr_info
->host_initiated
&& is_mci_status_msr(msr
) &&
3465 data
!= 0 && !can_set_mci_status(vcpu
))
3468 offset
= array_index_nospec(msr
- MSR_IA32_MC0_CTL
,
3469 last_msr
+ 1 - MSR_IA32_MC0_CTL
);
3470 vcpu
->arch
.mce_banks
[offset
] = data
;
3478 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu
*vcpu
)
3480 u64 mask
= KVM_ASYNC_PF_ENABLED
| KVM_ASYNC_PF_DELIVERY_AS_INT
;
3482 return (vcpu
->arch
.apf
.msr_en_val
& mask
) == mask
;
3485 static int kvm_pv_enable_async_pf(struct kvm_vcpu
*vcpu
, u64 data
)
3487 gpa_t gpa
= data
& ~0x3f;
3489 /* Bits 4:5 are reserved, Should be zero */
3493 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF_VMEXIT
) &&
3494 (data
& KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT
))
3497 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF_INT
) &&
3498 (data
& KVM_ASYNC_PF_DELIVERY_AS_INT
))
3501 if (!lapic_in_kernel(vcpu
))
3502 return data
? 1 : 0;
3504 vcpu
->arch
.apf
.msr_en_val
= data
;
3506 if (!kvm_pv_async_pf_enabled(vcpu
)) {
3507 kvm_clear_async_pf_completion_queue(vcpu
);
3508 kvm_async_pf_hash_reset(vcpu
);
3512 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
, &vcpu
->arch
.apf
.data
, gpa
,
3516 vcpu
->arch
.apf
.send_user_only
= !(data
& KVM_ASYNC_PF_SEND_ALWAYS
);
3517 vcpu
->arch
.apf
.delivery_as_pf_vmexit
= data
& KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT
;
3519 kvm_async_pf_wakeup_all(vcpu
);
3524 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu
*vcpu
, u64 data
)
3526 /* Bits 8-63 are reserved */
3530 if (!lapic_in_kernel(vcpu
))
3533 vcpu
->arch
.apf
.msr_int_val
= data
;
3535 vcpu
->arch
.apf
.vec
= data
& KVM_ASYNC_PF_VEC_MASK
;
3540 static void kvmclock_reset(struct kvm_vcpu
*vcpu
)
3542 kvm_gpc_deactivate(&vcpu
->arch
.pv_time
);
3543 vcpu
->arch
.time
= 0;
3546 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu
*vcpu
)
3548 ++vcpu
->stat
.tlb_flush
;
3549 static_call(kvm_x86_flush_tlb_all
)(vcpu
);
3551 /* Flushing all ASIDs flushes the current ASID... */
3552 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
3555 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu
*vcpu
)
3557 ++vcpu
->stat
.tlb_flush
;
3561 * A TLB flush on behalf of the guest is equivalent to
3562 * INVPCID(all), toggling CR4.PGE, etc., which requires
3563 * a forced sync of the shadow page tables. Ensure all the
3564 * roots are synced and the guest TLB in hardware is clean.
3566 kvm_mmu_sync_roots(vcpu
);
3567 kvm_mmu_sync_prev_roots(vcpu
);
3570 static_call(kvm_x86_flush_tlb_guest
)(vcpu
);
3573 * Flushing all "guest" TLB is always a superset of Hyper-V's fine
3576 kvm_hv_vcpu_purge_flush_tlb(vcpu
);
3580 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu
*vcpu
)
3582 ++vcpu
->stat
.tlb_flush
;
3583 static_call(kvm_x86_flush_tlb_current
)(vcpu
);
3587 * Service "local" TLB flush requests, which are specific to the current MMU
3588 * context. In addition to the generic event handling in vcpu_enter_guest(),
3589 * TLB flushes that are targeted at an MMU context also need to be serviced
3590 * prior before nested VM-Enter/VM-Exit.
3592 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu
*vcpu
)
3594 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
))
3595 kvm_vcpu_flush_tlb_current(vcpu
);
3597 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
))
3598 kvm_vcpu_flush_tlb_guest(vcpu
);
3600 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests
);
3602 static void record_steal_time(struct kvm_vcpu
*vcpu
)
3604 struct gfn_to_hva_cache
*ghc
= &vcpu
->arch
.st
.cache
;
3605 struct kvm_steal_time __user
*st
;
3606 struct kvm_memslots
*slots
;
3607 gpa_t gpa
= vcpu
->arch
.st
.msr_val
& KVM_STEAL_VALID_BITS
;
3611 if (kvm_xen_msr_enabled(vcpu
->kvm
)) {
3612 kvm_xen_runstate_set_running(vcpu
);
3616 if (!(vcpu
->arch
.st
.msr_val
& KVM_MSR_ENABLED
))
3619 if (WARN_ON_ONCE(current
->mm
!= vcpu
->kvm
->mm
))
3622 slots
= kvm_memslots(vcpu
->kvm
);
3624 if (unlikely(slots
->generation
!= ghc
->generation
||
3626 kvm_is_error_hva(ghc
->hva
) || !ghc
->memslot
)) {
3627 /* We rely on the fact that it fits in a single page. */
3628 BUILD_BUG_ON((sizeof(*st
) - 1) & KVM_STEAL_VALID_BITS
);
3630 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
, ghc
, gpa
, sizeof(*st
)) ||
3631 kvm_is_error_hva(ghc
->hva
) || !ghc
->memslot
)
3635 st
= (struct kvm_steal_time __user
*)ghc
->hva
;
3637 * Doing a TLB flush here, on the guest's behalf, can avoid
3640 if (guest_pv_has(vcpu
, KVM_FEATURE_PV_TLB_FLUSH
)) {
3641 u8 st_preempted
= 0;
3644 if (!user_access_begin(st
, sizeof(*st
)))
3647 asm volatile("1: xchgb %0, %2\n"
3650 _ASM_EXTABLE_UA(1b
, 2b
)
3651 : "+q" (st_preempted
),
3653 "+m" (st
->preempted
));
3659 vcpu
->arch
.st
.preempted
= 0;
3661 trace_kvm_pv_tlb_flush(vcpu
->vcpu_id
,
3662 st_preempted
& KVM_VCPU_FLUSH_TLB
);
3663 if (st_preempted
& KVM_VCPU_FLUSH_TLB
)
3664 kvm_vcpu_flush_tlb_guest(vcpu
);
3666 if (!user_access_begin(st
, sizeof(*st
)))
3669 if (!user_access_begin(st
, sizeof(*st
)))
3672 unsafe_put_user(0, &st
->preempted
, out
);
3673 vcpu
->arch
.st
.preempted
= 0;
3676 unsafe_get_user(version
, &st
->version
, out
);
3678 version
+= 1; /* first time write, random junk */
3681 unsafe_put_user(version
, &st
->version
, out
);
3685 unsafe_get_user(steal
, &st
->steal
, out
);
3686 steal
+= current
->sched_info
.run_delay
-
3687 vcpu
->arch
.st
.last_steal
;
3688 vcpu
->arch
.st
.last_steal
= current
->sched_info
.run_delay
;
3689 unsafe_put_user(steal
, &st
->steal
, out
);
3692 unsafe_put_user(version
, &st
->version
, out
);
3697 mark_page_dirty_in_slot(vcpu
->kvm
, ghc
->memslot
, gpa_to_gfn(ghc
->gpa
));
3700 static bool kvm_is_msr_to_save(u32 msr_index
)
3704 for (i
= 0; i
< num_msrs_to_save
; i
++) {
3705 if (msrs_to_save
[i
] == msr_index
)
3712 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
3714 u32 msr
= msr_info
->index
;
3715 u64 data
= msr_info
->data
;
3717 if (msr
&& msr
== vcpu
->kvm
->arch
.xen_hvm_config
.msr
)
3718 return kvm_xen_write_hypercall_page(vcpu
, data
);
3721 case MSR_AMD64_NB_CFG
:
3722 case MSR_IA32_UCODE_WRITE
:
3723 case MSR_VM_HSAVE_PA
:
3724 case MSR_AMD64_PATCH_LOADER
:
3725 case MSR_AMD64_BU_CFG2
:
3726 case MSR_AMD64_DC_CFG
:
3727 case MSR_AMD64_TW_CFG
:
3728 case MSR_F15H_EX_CFG
:
3731 case MSR_IA32_UCODE_REV
:
3732 if (msr_info
->host_initiated
)
3733 vcpu
->arch
.microcode_version
= data
;
3735 case MSR_IA32_ARCH_CAPABILITIES
:
3736 if (!msr_info
->host_initiated
)
3738 vcpu
->arch
.arch_capabilities
= data
;
3740 case MSR_IA32_PERF_CAPABILITIES
:
3741 if (!msr_info
->host_initiated
)
3743 if (data
& ~kvm_caps
.supported_perf_cap
)
3747 * Note, this is not just a performance optimization! KVM
3748 * disallows changing feature MSRs after the vCPU has run; PMU
3749 * refresh will bug the VM if called after the vCPU has run.
3751 if (vcpu
->arch
.perf_capabilities
== data
)
3754 vcpu
->arch
.perf_capabilities
= data
;
3755 kvm_pmu_refresh(vcpu
);
3757 case MSR_IA32_PRED_CMD
: {
3758 u64 reserved_bits
= ~(PRED_CMD_IBPB
| PRED_CMD_SBPB
);
3760 if (!msr_info
->host_initiated
) {
3761 if ((!guest_has_pred_cmd_msr(vcpu
)))
3764 if (!guest_cpuid_has(vcpu
, X86_FEATURE_SPEC_CTRL
) &&
3765 !guest_cpuid_has(vcpu
, X86_FEATURE_AMD_IBPB
))
3766 reserved_bits
|= PRED_CMD_IBPB
;
3768 if (!guest_cpuid_has(vcpu
, X86_FEATURE_SBPB
))
3769 reserved_bits
|= PRED_CMD_SBPB
;
3772 if (!boot_cpu_has(X86_FEATURE_IBPB
))
3773 reserved_bits
|= PRED_CMD_IBPB
;
3775 if (!boot_cpu_has(X86_FEATURE_SBPB
))
3776 reserved_bits
|= PRED_CMD_SBPB
;
3778 if (data
& reserved_bits
)
3784 wrmsrl(MSR_IA32_PRED_CMD
, data
);
3787 case MSR_IA32_FLUSH_CMD
:
3788 if (!msr_info
->host_initiated
&&
3789 !guest_cpuid_has(vcpu
, X86_FEATURE_FLUSH_L1D
))
3792 if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D
) || (data
& ~L1D_FLUSH
))
3797 wrmsrl(MSR_IA32_FLUSH_CMD
, L1D_FLUSH
);
3800 return set_efer(vcpu
, msr_info
);
3802 data
&= ~(u64
)0x40; /* ignore flush filter disable */
3803 data
&= ~(u64
)0x100; /* ignore ignne emulation enable */
3804 data
&= ~(u64
)0x8; /* ignore TLB cache disable */
3807 * Allow McStatusWrEn and TscFreqSel. (Linux guests from v3.2
3808 * through at least v6.6 whine if TscFreqSel is clear,
3809 * depending on F/M/S.
3811 if (data
& ~(BIT_ULL(18) | BIT_ULL(24))) {
3812 kvm_pr_unimpl_wrmsr(vcpu
, msr
, data
);
3815 vcpu
->arch
.msr_hwcr
= data
;
3817 case MSR_FAM10H_MMIO_CONF_BASE
:
3819 kvm_pr_unimpl_wrmsr(vcpu
, msr
, data
);
3823 case MSR_IA32_CR_PAT
:
3824 if (!kvm_pat_valid(data
))
3827 vcpu
->arch
.pat
= data
;
3829 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000
:
3830 case MSR_MTRRdefType
:
3831 return kvm_mtrr_set_msr(vcpu
, msr
, data
);
3832 case MSR_IA32_APICBASE
:
3833 return kvm_set_apic_base(vcpu
, msr_info
);
3834 case APIC_BASE_MSR
... APIC_BASE_MSR
+ 0xff:
3835 return kvm_x2apic_msr_write(vcpu
, msr
, data
);
3836 case MSR_IA32_TSC_DEADLINE
:
3837 kvm_set_lapic_tscdeadline_msr(vcpu
, data
);
3839 case MSR_IA32_TSC_ADJUST
:
3840 if (guest_cpuid_has(vcpu
, X86_FEATURE_TSC_ADJUST
)) {
3841 if (!msr_info
->host_initiated
) {
3842 s64 adj
= data
- vcpu
->arch
.ia32_tsc_adjust_msr
;
3843 adjust_tsc_offset_guest(vcpu
, adj
);
3844 /* Before back to guest, tsc_timestamp must be adjusted
3845 * as well, otherwise guest's percpu pvclock time could jump.
3847 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
3849 vcpu
->arch
.ia32_tsc_adjust_msr
= data
;
3852 case MSR_IA32_MISC_ENABLE
: {
3853 u64 old_val
= vcpu
->arch
.ia32_misc_enable_msr
;
3855 if (!msr_info
->host_initiated
) {
3857 if ((old_val
^ data
) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK
)
3860 /* R bits, i.e. writes are ignored, but don't fault. */
3861 data
= data
& ~MSR_IA32_MISC_ENABLE_EMON
;
3862 data
|= old_val
& MSR_IA32_MISC_ENABLE_EMON
;
3865 if (!kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT
) &&
3866 ((old_val
^ data
) & MSR_IA32_MISC_ENABLE_MWAIT
)) {
3867 if (!guest_cpuid_has(vcpu
, X86_FEATURE_XMM3
))
3869 vcpu
->arch
.ia32_misc_enable_msr
= data
;
3870 kvm_update_cpuid_runtime(vcpu
);
3872 vcpu
->arch
.ia32_misc_enable_msr
= data
;
3876 case MSR_IA32_SMBASE
:
3877 if (!IS_ENABLED(CONFIG_KVM_SMM
) || !msr_info
->host_initiated
)
3879 vcpu
->arch
.smbase
= data
;
3881 case MSR_IA32_POWER_CTL
:
3882 vcpu
->arch
.msr_ia32_power_ctl
= data
;
3885 if (msr_info
->host_initiated
) {
3886 kvm_synchronize_tsc(vcpu
, &data
);
3888 u64 adj
= kvm_compute_l1_tsc_offset(vcpu
, data
) - vcpu
->arch
.l1_tsc_offset
;
3889 adjust_tsc_offset_guest(vcpu
, adj
);
3890 vcpu
->arch
.ia32_tsc_adjust_msr
+= adj
;
3894 if (!msr_info
->host_initiated
&&
3895 !guest_cpuid_has(vcpu
, X86_FEATURE_XSAVES
))
3898 * KVM supports exposing PT to the guest, but does not support
3899 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
3900 * XSAVES/XRSTORS to save/restore PT MSRs.
3902 if (data
& ~kvm_caps
.supported_xss
)
3904 vcpu
->arch
.ia32_xss
= data
;
3905 kvm_update_cpuid_runtime(vcpu
);
3908 if (!msr_info
->host_initiated
)
3910 vcpu
->arch
.smi_count
= data
;
3912 case MSR_KVM_WALL_CLOCK_NEW
:
3913 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE2
))
3916 vcpu
->kvm
->arch
.wall_clock
= data
;
3917 kvm_write_wall_clock(vcpu
->kvm
, data
, 0);
3919 case MSR_KVM_WALL_CLOCK
:
3920 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE
))
3923 vcpu
->kvm
->arch
.wall_clock
= data
;
3924 kvm_write_wall_clock(vcpu
->kvm
, data
, 0);
3926 case MSR_KVM_SYSTEM_TIME_NEW
:
3927 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE2
))
3930 kvm_write_system_time(vcpu
, data
, false, msr_info
->host_initiated
);
3932 case MSR_KVM_SYSTEM_TIME
:
3933 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE
))
3936 kvm_write_system_time(vcpu
, data
, true, msr_info
->host_initiated
);
3938 case MSR_KVM_ASYNC_PF_EN
:
3939 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF
))
3942 if (kvm_pv_enable_async_pf(vcpu
, data
))
3945 case MSR_KVM_ASYNC_PF_INT
:
3946 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF_INT
))
3949 if (kvm_pv_enable_async_pf_int(vcpu
, data
))
3952 case MSR_KVM_ASYNC_PF_ACK
:
3953 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF_INT
))
3956 vcpu
->arch
.apf
.pageready_pending
= false;
3957 kvm_check_async_pf_completion(vcpu
);
3960 case MSR_KVM_STEAL_TIME
:
3961 if (!guest_pv_has(vcpu
, KVM_FEATURE_STEAL_TIME
))
3964 if (unlikely(!sched_info_on()))
3967 if (data
& KVM_STEAL_RESERVED_MASK
)
3970 vcpu
->arch
.st
.msr_val
= data
;
3972 if (!(data
& KVM_MSR_ENABLED
))
3975 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
3978 case MSR_KVM_PV_EOI_EN
:
3979 if (!guest_pv_has(vcpu
, KVM_FEATURE_PV_EOI
))
3982 if (kvm_lapic_set_pv_eoi(vcpu
, data
, sizeof(u8
)))
3986 case MSR_KVM_POLL_CONTROL
:
3987 if (!guest_pv_has(vcpu
, KVM_FEATURE_POLL_CONTROL
))
3990 /* only enable bit supported */
3991 if (data
& (-1ULL << 1))
3994 vcpu
->arch
.msr_kvm_poll_control
= data
;
3997 case MSR_IA32_MCG_CTL
:
3998 case MSR_IA32_MCG_STATUS
:
3999 case MSR_IA32_MC0_CTL
... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS
) - 1:
4000 case MSR_IA32_MC0_CTL2
... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS
) - 1:
4001 return set_msr_mce(vcpu
, msr_info
);
4003 case MSR_K7_PERFCTR0
... MSR_K7_PERFCTR3
:
4004 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR1
:
4005 case MSR_K7_EVNTSEL0
... MSR_K7_EVNTSEL3
:
4006 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL1
:
4007 if (kvm_pmu_is_valid_msr(vcpu
, msr
))
4008 return kvm_pmu_set_msr(vcpu
, msr_info
);
4011 kvm_pr_unimpl_wrmsr(vcpu
, msr
, data
);
4013 case MSR_K7_CLK_CTL
:
4015 * Ignore all writes to this no longer documented MSR.
4016 * Writes are only relevant for old K7 processors,
4017 * all pre-dating SVM, but a recommended workaround from
4018 * AMD for these chips. It is possible to specify the
4019 * affected processor models on the command line, hence
4020 * the need to ignore the workaround.
4023 case HV_X64_MSR_GUEST_OS_ID
... HV_X64_MSR_SINT15
:
4024 case HV_X64_MSR_SYNDBG_CONTROL
... HV_X64_MSR_SYNDBG_PENDING_BUFFER
:
4025 case HV_X64_MSR_SYNDBG_OPTIONS
:
4026 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
4027 case HV_X64_MSR_CRASH_CTL
:
4028 case HV_X64_MSR_STIMER0_CONFIG
... HV_X64_MSR_STIMER3_COUNT
:
4029 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
4030 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
4031 case HV_X64_MSR_TSC_EMULATION_STATUS
:
4032 case HV_X64_MSR_TSC_INVARIANT_CONTROL
:
4033 return kvm_hv_set_msr_common(vcpu
, msr
, data
,
4034 msr_info
->host_initiated
);
4035 case MSR_IA32_BBL_CR_CTL3
:
4036 /* Drop writes to this legacy MSR -- see rdmsr
4037 * counterpart for further detail.
4039 kvm_pr_unimpl_wrmsr(vcpu
, msr
, data
);
4041 case MSR_AMD64_OSVW_ID_LENGTH
:
4042 if (!guest_cpuid_has(vcpu
, X86_FEATURE_OSVW
))
4044 vcpu
->arch
.osvw
.length
= data
;
4046 case MSR_AMD64_OSVW_STATUS
:
4047 if (!guest_cpuid_has(vcpu
, X86_FEATURE_OSVW
))
4049 vcpu
->arch
.osvw
.status
= data
;
4051 case MSR_PLATFORM_INFO
:
4052 if (!msr_info
->host_initiated
||
4053 (!(data
& MSR_PLATFORM_INFO_CPUID_FAULT
) &&
4054 cpuid_fault_enabled(vcpu
)))
4056 vcpu
->arch
.msr_platform_info
= data
;
4058 case MSR_MISC_FEATURES_ENABLES
:
4059 if (data
& ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
||
4060 (data
& MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
&&
4061 !supports_cpuid_fault(vcpu
)))
4063 vcpu
->arch
.msr_misc_features_enables
= data
;
4065 #ifdef CONFIG_X86_64
4067 if (!msr_info
->host_initiated
&&
4068 !guest_cpuid_has(vcpu
, X86_FEATURE_XFD
))
4071 if (data
& ~kvm_guest_supported_xfd(vcpu
))
4074 fpu_update_guest_xfd(&vcpu
->arch
.guest_fpu
, data
);
4076 case MSR_IA32_XFD_ERR
:
4077 if (!msr_info
->host_initiated
&&
4078 !guest_cpuid_has(vcpu
, X86_FEATURE_XFD
))
4081 if (data
& ~kvm_guest_supported_xfd(vcpu
))
4084 vcpu
->arch
.guest_fpu
.xfd_err
= data
;
4088 if (kvm_pmu_is_valid_msr(vcpu
, msr
))
4089 return kvm_pmu_set_msr(vcpu
, msr_info
);
4092 * Userspace is allowed to write '0' to MSRs that KVM reports
4093 * as to-be-saved, even if an MSRs isn't fully supported.
4095 if (msr_info
->host_initiated
&& !data
&&
4096 kvm_is_msr_to_save(msr
))
4099 return KVM_MSR_RET_INVALID
;
4103 EXPORT_SYMBOL_GPL(kvm_set_msr_common
);
4105 static int get_msr_mce(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
, bool host
)
4108 u64 mcg_cap
= vcpu
->arch
.mcg_cap
;
4109 unsigned bank_num
= mcg_cap
& 0xff;
4110 u32 offset
, last_msr
;
4113 case MSR_IA32_P5_MC_ADDR
:
4114 case MSR_IA32_P5_MC_TYPE
:
4117 case MSR_IA32_MCG_CAP
:
4118 data
= vcpu
->arch
.mcg_cap
;
4120 case MSR_IA32_MCG_CTL
:
4121 if (!(mcg_cap
& MCG_CTL_P
) && !host
)
4123 data
= vcpu
->arch
.mcg_ctl
;
4125 case MSR_IA32_MCG_STATUS
:
4126 data
= vcpu
->arch
.mcg_status
;
4128 case MSR_IA32_MC0_CTL2
... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS
) - 1:
4129 last_msr
= MSR_IA32_MCx_CTL2(bank_num
) - 1;
4133 if (!(mcg_cap
& MCG_CMCI_P
) && !host
)
4135 offset
= array_index_nospec(msr
- MSR_IA32_MC0_CTL2
,
4136 last_msr
+ 1 - MSR_IA32_MC0_CTL2
);
4137 data
= vcpu
->arch
.mci_ctl2_banks
[offset
];
4139 case MSR_IA32_MC0_CTL
... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS
) - 1:
4140 last_msr
= MSR_IA32_MCx_CTL(bank_num
) - 1;
4144 offset
= array_index_nospec(msr
- MSR_IA32_MC0_CTL
,
4145 last_msr
+ 1 - MSR_IA32_MC0_CTL
);
4146 data
= vcpu
->arch
.mce_banks
[offset
];
4155 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
4157 switch (msr_info
->index
) {
4158 case MSR_IA32_PLATFORM_ID
:
4159 case MSR_IA32_EBL_CR_POWERON
:
4160 case MSR_IA32_LASTBRANCHFROMIP
:
4161 case MSR_IA32_LASTBRANCHTOIP
:
4162 case MSR_IA32_LASTINTFROMIP
:
4163 case MSR_IA32_LASTINTTOIP
:
4164 case MSR_AMD64_SYSCFG
:
4165 case MSR_K8_TSEG_ADDR
:
4166 case MSR_K8_TSEG_MASK
:
4167 case MSR_VM_HSAVE_PA
:
4168 case MSR_K8_INT_PENDING_MSG
:
4169 case MSR_AMD64_NB_CFG
:
4170 case MSR_FAM10H_MMIO_CONF_BASE
:
4171 case MSR_AMD64_BU_CFG2
:
4172 case MSR_IA32_PERF_CTL
:
4173 case MSR_AMD64_DC_CFG
:
4174 case MSR_AMD64_TW_CFG
:
4175 case MSR_F15H_EX_CFG
:
4177 * Intel Sandy Bridge CPUs must support the RAPL (running average power
4178 * limit) MSRs. Just return 0, as we do not want to expose the host
4179 * data here. Do not conditionalize this on CPUID, as KVM does not do
4180 * so for existing CPU-specific MSRs.
4182 case MSR_RAPL_POWER_UNIT
:
4183 case MSR_PP0_ENERGY_STATUS
: /* Power plane 0 (core) */
4184 case MSR_PP1_ENERGY_STATUS
: /* Power plane 1 (graphics uncore) */
4185 case MSR_PKG_ENERGY_STATUS
: /* Total package */
4186 case MSR_DRAM_ENERGY_STATUS
: /* DRAM controller */
4189 case MSR_K7_EVNTSEL0
... MSR_K7_EVNTSEL3
:
4190 case MSR_K7_PERFCTR0
... MSR_K7_PERFCTR3
:
4191 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR1
:
4192 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL1
:
4193 if (kvm_pmu_is_valid_msr(vcpu
, msr_info
->index
))
4194 return kvm_pmu_get_msr(vcpu
, msr_info
);
4197 case MSR_IA32_UCODE_REV
:
4198 msr_info
->data
= vcpu
->arch
.microcode_version
;
4200 case MSR_IA32_ARCH_CAPABILITIES
:
4201 if (!msr_info
->host_initiated
&&
4202 !guest_cpuid_has(vcpu
, X86_FEATURE_ARCH_CAPABILITIES
))
4204 msr_info
->data
= vcpu
->arch
.arch_capabilities
;
4206 case MSR_IA32_PERF_CAPABILITIES
:
4207 if (!msr_info
->host_initiated
&&
4208 !guest_cpuid_has(vcpu
, X86_FEATURE_PDCM
))
4210 msr_info
->data
= vcpu
->arch
.perf_capabilities
;
4212 case MSR_IA32_POWER_CTL
:
4213 msr_info
->data
= vcpu
->arch
.msr_ia32_power_ctl
;
4215 case MSR_IA32_TSC
: {
4217 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
4218 * even when not intercepted. AMD manual doesn't explicitly
4219 * state this but appears to behave the same.
4221 * On userspace reads and writes, however, we unconditionally
4222 * return L1's TSC value to ensure backwards-compatible
4223 * behavior for migration.
4227 if (msr_info
->host_initiated
) {
4228 offset
= vcpu
->arch
.l1_tsc_offset
;
4229 ratio
= vcpu
->arch
.l1_tsc_scaling_ratio
;
4231 offset
= vcpu
->arch
.tsc_offset
;
4232 ratio
= vcpu
->arch
.tsc_scaling_ratio
;
4235 msr_info
->data
= kvm_scale_tsc(rdtsc(), ratio
) + offset
;
4238 case MSR_IA32_CR_PAT
:
4239 msr_info
->data
= vcpu
->arch
.pat
;
4242 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000
:
4243 case MSR_MTRRdefType
:
4244 return kvm_mtrr_get_msr(vcpu
, msr_info
->index
, &msr_info
->data
);
4245 case 0xcd: /* fsb frequency */
4249 * MSR_EBC_FREQUENCY_ID
4250 * Conservative value valid for even the basic CPU models.
4251 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
4252 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
4253 * and 266MHz for model 3, or 4. Set Core Clock
4254 * Frequency to System Bus Frequency Ratio to 1 (bits
4255 * 31:24) even though these are only valid for CPU
4256 * models > 2, however guests may end up dividing or
4257 * multiplying by zero otherwise.
4259 case MSR_EBC_FREQUENCY_ID
:
4260 msr_info
->data
= 1 << 24;
4262 case MSR_IA32_APICBASE
:
4263 msr_info
->data
= kvm_get_apic_base(vcpu
);
4265 case APIC_BASE_MSR
... APIC_BASE_MSR
+ 0xff:
4266 return kvm_x2apic_msr_read(vcpu
, msr_info
->index
, &msr_info
->data
);
4267 case MSR_IA32_TSC_DEADLINE
:
4268 msr_info
->data
= kvm_get_lapic_tscdeadline_msr(vcpu
);
4270 case MSR_IA32_TSC_ADJUST
:
4271 msr_info
->data
= (u64
)vcpu
->arch
.ia32_tsc_adjust_msr
;
4273 case MSR_IA32_MISC_ENABLE
:
4274 msr_info
->data
= vcpu
->arch
.ia32_misc_enable_msr
;
4276 case MSR_IA32_SMBASE
:
4277 if (!IS_ENABLED(CONFIG_KVM_SMM
) || !msr_info
->host_initiated
)
4279 msr_info
->data
= vcpu
->arch
.smbase
;
4282 msr_info
->data
= vcpu
->arch
.smi_count
;
4284 case MSR_IA32_PERF_STATUS
:
4285 /* TSC increment by tick */
4286 msr_info
->data
= 1000ULL;
4287 /* CPU multiplier */
4288 msr_info
->data
|= (((uint64_t)4ULL) << 40);
4291 msr_info
->data
= vcpu
->arch
.efer
;
4293 case MSR_KVM_WALL_CLOCK
:
4294 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE
))
4297 msr_info
->data
= vcpu
->kvm
->arch
.wall_clock
;
4299 case MSR_KVM_WALL_CLOCK_NEW
:
4300 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE2
))
4303 msr_info
->data
= vcpu
->kvm
->arch
.wall_clock
;
4305 case MSR_KVM_SYSTEM_TIME
:
4306 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE
))
4309 msr_info
->data
= vcpu
->arch
.time
;
4311 case MSR_KVM_SYSTEM_TIME_NEW
:
4312 if (!guest_pv_has(vcpu
, KVM_FEATURE_CLOCKSOURCE2
))
4315 msr_info
->data
= vcpu
->arch
.time
;
4317 case MSR_KVM_ASYNC_PF_EN
:
4318 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF
))
4321 msr_info
->data
= vcpu
->arch
.apf
.msr_en_val
;
4323 case MSR_KVM_ASYNC_PF_INT
:
4324 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF_INT
))
4327 msr_info
->data
= vcpu
->arch
.apf
.msr_int_val
;
4329 case MSR_KVM_ASYNC_PF_ACK
:
4330 if (!guest_pv_has(vcpu
, KVM_FEATURE_ASYNC_PF_INT
))
4335 case MSR_KVM_STEAL_TIME
:
4336 if (!guest_pv_has(vcpu
, KVM_FEATURE_STEAL_TIME
))
4339 msr_info
->data
= vcpu
->arch
.st
.msr_val
;
4341 case MSR_KVM_PV_EOI_EN
:
4342 if (!guest_pv_has(vcpu
, KVM_FEATURE_PV_EOI
))
4345 msr_info
->data
= vcpu
->arch
.pv_eoi
.msr_val
;
4347 case MSR_KVM_POLL_CONTROL
:
4348 if (!guest_pv_has(vcpu
, KVM_FEATURE_POLL_CONTROL
))
4351 msr_info
->data
= vcpu
->arch
.msr_kvm_poll_control
;
4353 case MSR_IA32_P5_MC_ADDR
:
4354 case MSR_IA32_P5_MC_TYPE
:
4355 case MSR_IA32_MCG_CAP
:
4356 case MSR_IA32_MCG_CTL
:
4357 case MSR_IA32_MCG_STATUS
:
4358 case MSR_IA32_MC0_CTL
... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS
) - 1:
4359 case MSR_IA32_MC0_CTL2
... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS
) - 1:
4360 return get_msr_mce(vcpu
, msr_info
->index
, &msr_info
->data
,
4361 msr_info
->host_initiated
);
4363 if (!msr_info
->host_initiated
&&
4364 !guest_cpuid_has(vcpu
, X86_FEATURE_XSAVES
))
4366 msr_info
->data
= vcpu
->arch
.ia32_xss
;
4368 case MSR_K7_CLK_CTL
:
4370 * Provide expected ramp-up count for K7. All other
4371 * are set to zero, indicating minimum divisors for
4374 * This prevents guest kernels on AMD host with CPU
4375 * type 6, model 8 and higher from exploding due to
4376 * the rdmsr failing.
4378 msr_info
->data
= 0x20000000;
4380 case HV_X64_MSR_GUEST_OS_ID
... HV_X64_MSR_SINT15
:
4381 case HV_X64_MSR_SYNDBG_CONTROL
... HV_X64_MSR_SYNDBG_PENDING_BUFFER
:
4382 case HV_X64_MSR_SYNDBG_OPTIONS
:
4383 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
4384 case HV_X64_MSR_CRASH_CTL
:
4385 case HV_X64_MSR_STIMER0_CONFIG
... HV_X64_MSR_STIMER3_COUNT
:
4386 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
4387 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
4388 case HV_X64_MSR_TSC_EMULATION_STATUS
:
4389 case HV_X64_MSR_TSC_INVARIANT_CONTROL
:
4390 return kvm_hv_get_msr_common(vcpu
,
4391 msr_info
->index
, &msr_info
->data
,
4392 msr_info
->host_initiated
);
4393 case MSR_IA32_BBL_CR_CTL3
:
4394 /* This legacy MSR exists but isn't fully documented in current
4395 * silicon. It is however accessed by winxp in very narrow
4396 * scenarios where it sets bit #19, itself documented as
4397 * a "reserved" bit. Best effort attempt to source coherent
4398 * read data here should the balance of the register be
4399 * interpreted by the guest:
4401 * L2 cache control register 3: 64GB range, 256KB size,
4402 * enabled, latency 0x1, configured
4404 msr_info
->data
= 0xbe702111;
4406 case MSR_AMD64_OSVW_ID_LENGTH
:
4407 if (!guest_cpuid_has(vcpu
, X86_FEATURE_OSVW
))
4409 msr_info
->data
= vcpu
->arch
.osvw
.length
;
4411 case MSR_AMD64_OSVW_STATUS
:
4412 if (!guest_cpuid_has(vcpu
, X86_FEATURE_OSVW
))
4414 msr_info
->data
= vcpu
->arch
.osvw
.status
;
4416 case MSR_PLATFORM_INFO
:
4417 if (!msr_info
->host_initiated
&&
4418 !vcpu
->kvm
->arch
.guest_can_read_msr_platform_info
)
4420 msr_info
->data
= vcpu
->arch
.msr_platform_info
;
4422 case MSR_MISC_FEATURES_ENABLES
:
4423 msr_info
->data
= vcpu
->arch
.msr_misc_features_enables
;
4426 msr_info
->data
= vcpu
->arch
.msr_hwcr
;
4428 #ifdef CONFIG_X86_64
4430 if (!msr_info
->host_initiated
&&
4431 !guest_cpuid_has(vcpu
, X86_FEATURE_XFD
))
4434 msr_info
->data
= vcpu
->arch
.guest_fpu
.fpstate
->xfd
;
4436 case MSR_IA32_XFD_ERR
:
4437 if (!msr_info
->host_initiated
&&
4438 !guest_cpuid_has(vcpu
, X86_FEATURE_XFD
))
4441 msr_info
->data
= vcpu
->arch
.guest_fpu
.xfd_err
;
4445 if (kvm_pmu_is_valid_msr(vcpu
, msr_info
->index
))
4446 return kvm_pmu_get_msr(vcpu
, msr_info
);
4449 * Userspace is allowed to read MSRs that KVM reports as
4450 * to-be-saved, even if an MSR isn't fully supported.
4452 if (msr_info
->host_initiated
&&
4453 kvm_is_msr_to_save(msr_info
->index
)) {
4458 return KVM_MSR_RET_INVALID
;
4462 EXPORT_SYMBOL_GPL(kvm_get_msr_common
);
4465 * Read or write a bunch of msrs. All parameters are kernel addresses.
4467 * @return number of msrs set successfully.
4469 static int __msr_io(struct kvm_vcpu
*vcpu
, struct kvm_msrs
*msrs
,
4470 struct kvm_msr_entry
*entries
,
4471 int (*do_msr
)(struct kvm_vcpu
*vcpu
,
4472 unsigned index
, u64
*data
))
4476 for (i
= 0; i
< msrs
->nmsrs
; ++i
)
4477 if (do_msr(vcpu
, entries
[i
].index
, &entries
[i
].data
))
4484 * Read or write a bunch of msrs. Parameters are user addresses.
4486 * @return number of msrs set successfully.
4488 static int msr_io(struct kvm_vcpu
*vcpu
, struct kvm_msrs __user
*user_msrs
,
4489 int (*do_msr
)(struct kvm_vcpu
*vcpu
,
4490 unsigned index
, u64
*data
),
4493 struct kvm_msrs msrs
;
4494 struct kvm_msr_entry
*entries
;
4499 if (copy_from_user(&msrs
, user_msrs
, sizeof(msrs
)))
4503 if (msrs
.nmsrs
>= MAX_IO_MSRS
)
4506 size
= sizeof(struct kvm_msr_entry
) * msrs
.nmsrs
;
4507 entries
= memdup_user(user_msrs
->entries
, size
);
4508 if (IS_ERR(entries
)) {
4509 r
= PTR_ERR(entries
);
4513 r
= __msr_io(vcpu
, &msrs
, entries
, do_msr
);
4515 if (writeback
&& copy_to_user(user_msrs
->entries
, entries
, size
))
4523 static inline bool kvm_can_mwait_in_guest(void)
4525 return boot_cpu_has(X86_FEATURE_MWAIT
) &&
4526 !boot_cpu_has_bug(X86_BUG_MONITOR
) &&
4527 boot_cpu_has(X86_FEATURE_ARAT
);
4530 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu
*vcpu
,
4531 struct kvm_cpuid2 __user
*cpuid_arg
)
4533 struct kvm_cpuid2 cpuid
;
4537 if (copy_from_user(&cpuid
, cpuid_arg
, sizeof(cpuid
)))
4540 r
= kvm_get_hv_cpuid(vcpu
, &cpuid
, cpuid_arg
->entries
);
4545 if (copy_to_user(cpuid_arg
, &cpuid
, sizeof(cpuid
)))
4551 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
4556 case KVM_CAP_IRQCHIP
:
4558 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL
:
4559 case KVM_CAP_SET_TSS_ADDR
:
4560 case KVM_CAP_EXT_CPUID
:
4561 case KVM_CAP_EXT_EMUL_CPUID
:
4562 case KVM_CAP_CLOCKSOURCE
:
4564 case KVM_CAP_NOP_IO_DELAY
:
4565 case KVM_CAP_MP_STATE
:
4566 case KVM_CAP_SYNC_MMU
:
4567 case KVM_CAP_USER_NMI
:
4568 case KVM_CAP_REINJECT_CONTROL
:
4569 case KVM_CAP_IRQ_INJECT_STATUS
:
4570 case KVM_CAP_IOEVENTFD
:
4571 case KVM_CAP_IOEVENTFD_NO_LENGTH
:
4573 case KVM_CAP_PIT_STATE2
:
4574 case KVM_CAP_SET_IDENTITY_MAP_ADDR
:
4575 case KVM_CAP_VCPU_EVENTS
:
4576 case KVM_CAP_HYPERV
:
4577 case KVM_CAP_HYPERV_VAPIC
:
4578 case KVM_CAP_HYPERV_SPIN
:
4579 case KVM_CAP_HYPERV_SYNIC
:
4580 case KVM_CAP_HYPERV_SYNIC2
:
4581 case KVM_CAP_HYPERV_VP_INDEX
:
4582 case KVM_CAP_HYPERV_EVENTFD
:
4583 case KVM_CAP_HYPERV_TLBFLUSH
:
4584 case KVM_CAP_HYPERV_SEND_IPI
:
4585 case KVM_CAP_HYPERV_CPUID
:
4586 case KVM_CAP_HYPERV_ENFORCE_CPUID
:
4587 case KVM_CAP_SYS_HYPERV_CPUID
:
4588 case KVM_CAP_PCI_SEGMENT
:
4589 case KVM_CAP_DEBUGREGS
:
4590 case KVM_CAP_X86_ROBUST_SINGLESTEP
:
4592 case KVM_CAP_ASYNC_PF
:
4593 case KVM_CAP_ASYNC_PF_INT
:
4594 case KVM_CAP_GET_TSC_KHZ
:
4595 case KVM_CAP_KVMCLOCK_CTRL
:
4596 case KVM_CAP_READONLY_MEM
:
4597 case KVM_CAP_HYPERV_TIME
:
4598 case KVM_CAP_IOAPIC_POLARITY_IGNORED
:
4599 case KVM_CAP_TSC_DEADLINE_TIMER
:
4600 case KVM_CAP_DISABLE_QUIRKS
:
4601 case KVM_CAP_SET_BOOT_CPU_ID
:
4602 case KVM_CAP_SPLIT_IRQCHIP
:
4603 case KVM_CAP_IMMEDIATE_EXIT
:
4604 case KVM_CAP_PMU_EVENT_FILTER
:
4605 case KVM_CAP_PMU_EVENT_MASKED_EVENTS
:
4606 case KVM_CAP_GET_MSR_FEATURES
:
4607 case KVM_CAP_MSR_PLATFORM_INFO
:
4608 case KVM_CAP_EXCEPTION_PAYLOAD
:
4609 case KVM_CAP_X86_TRIPLE_FAULT_EVENT
:
4610 case KVM_CAP_SET_GUEST_DEBUG
:
4611 case KVM_CAP_LAST_CPU
:
4612 case KVM_CAP_X86_USER_SPACE_MSR
:
4613 case KVM_CAP_X86_MSR_FILTER
:
4614 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID
:
4615 #ifdef CONFIG_X86_SGX_KVM
4616 case KVM_CAP_SGX_ATTRIBUTE
:
4618 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
:
4619 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
:
4620 case KVM_CAP_SREGS2
:
4621 case KVM_CAP_EXIT_ON_EMULATION_FAILURE
:
4622 case KVM_CAP_VCPU_ATTRIBUTES
:
4623 case KVM_CAP_SYS_ATTRIBUTES
:
4625 case KVM_CAP_ENABLE_CAP
:
4626 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
:
4627 case KVM_CAP_IRQFD_RESAMPLE
:
4630 case KVM_CAP_EXIT_HYPERCALL
:
4631 r
= KVM_EXIT_HYPERCALL_VALID_MASK
;
4633 case KVM_CAP_SET_GUEST_DEBUG2
:
4634 return KVM_GUESTDBG_VALID_MASK
;
4635 #ifdef CONFIG_KVM_XEN
4636 case KVM_CAP_XEN_HVM
:
4637 r
= KVM_XEN_HVM_CONFIG_HYPERCALL_MSR
|
4638 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL
|
4639 KVM_XEN_HVM_CONFIG_SHARED_INFO
|
4640 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL
|
4641 KVM_XEN_HVM_CONFIG_EVTCHN_SEND
;
4642 if (sched_info_on())
4643 r
|= KVM_XEN_HVM_CONFIG_RUNSTATE
|
4644 KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG
;
4647 case KVM_CAP_SYNC_REGS
:
4648 r
= KVM_SYNC_X86_VALID_FIELDS
;
4650 case KVM_CAP_ADJUST_CLOCK
:
4651 r
= KVM_CLOCK_VALID_FLAGS
;
4653 case KVM_CAP_X86_DISABLE_EXITS
:
4654 r
= KVM_X86_DISABLE_EXITS_PAUSE
;
4656 if (!mitigate_smt_rsb
) {
4657 r
|= KVM_X86_DISABLE_EXITS_HLT
|
4658 KVM_X86_DISABLE_EXITS_CSTATE
;
4660 if (kvm_can_mwait_in_guest())
4661 r
|= KVM_X86_DISABLE_EXITS_MWAIT
;
4664 case KVM_CAP_X86_SMM
:
4665 if (!IS_ENABLED(CONFIG_KVM_SMM
))
4668 /* SMBASE is usually relocated above 1M on modern chipsets,
4669 * and SMM handlers might indeed rely on 4G segment limits,
4670 * so do not report SMM to be available if real mode is
4671 * emulated via vm86 mode. Still, do not go to great lengths
4672 * to avoid userspace's usage of the feature, because it is a
4673 * fringe case that is not enabled except via specific settings
4674 * of the module parameters.
4676 r
= static_call(kvm_x86_has_emulated_msr
)(kvm
, MSR_IA32_SMBASE
);
4678 case KVM_CAP_NR_VCPUS
:
4679 r
= min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS
);
4681 case KVM_CAP_MAX_VCPUS
:
4684 case KVM_CAP_MAX_VCPU_ID
:
4685 r
= KVM_MAX_VCPU_IDS
;
4687 case KVM_CAP_PV_MMU
: /* obsolete */
4691 r
= KVM_MAX_MCE_BANKS
;
4694 r
= boot_cpu_has(X86_FEATURE_XSAVE
);
4696 case KVM_CAP_TSC_CONTROL
:
4697 case KVM_CAP_VM_TSC_CONTROL
:
4698 r
= kvm_caps
.has_tsc_control
;
4700 case KVM_CAP_X2APIC_API
:
4701 r
= KVM_X2APIC_API_VALID_FLAGS
;
4703 case KVM_CAP_NESTED_STATE
:
4704 r
= kvm_x86_ops
.nested_ops
->get_state
?
4705 kvm_x86_ops
.nested_ops
->get_state(NULL
, NULL
, 0) : 0;
4707 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH
:
4708 r
= kvm_x86_ops
.enable_l2_tlb_flush
!= NULL
;
4710 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS
:
4711 r
= kvm_x86_ops
.nested_ops
->enable_evmcs
!= NULL
;
4713 case KVM_CAP_SMALLER_MAXPHYADDR
:
4714 r
= (int) allow_smaller_maxphyaddr
;
4716 case KVM_CAP_STEAL_TIME
:
4717 r
= sched_info_on();
4719 case KVM_CAP_X86_BUS_LOCK_EXIT
:
4720 if (kvm_caps
.has_bus_lock_exit
)
4721 r
= KVM_BUS_LOCK_DETECTION_OFF
|
4722 KVM_BUS_LOCK_DETECTION_EXIT
;
4726 case KVM_CAP_XSAVE2
: {
4727 r
= xstate_required_size(kvm_get_filtered_xcr0(), false);
4728 if (r
< sizeof(struct kvm_xsave
))
4729 r
= sizeof(struct kvm_xsave
);
4732 case KVM_CAP_PMU_CAPABILITY
:
4733 r
= enable_pmu
? KVM_CAP_PMU_VALID_MASK
: 0;
4735 case KVM_CAP_DISABLE_QUIRKS2
:
4736 r
= KVM_X86_VALID_QUIRKS
;
4738 case KVM_CAP_X86_NOTIFY_VMEXIT
:
4739 r
= kvm_caps
.has_notify_vmexit
;
4747 static inline void __user
*kvm_get_attr_addr(struct kvm_device_attr
*attr
)
4749 void __user
*uaddr
= (void __user
*)(unsigned long)attr
->addr
;
4751 if ((u64
)(unsigned long)uaddr
!= attr
->addr
)
4752 return ERR_PTR_USR(-EFAULT
);
4756 static int kvm_x86_dev_get_attr(struct kvm_device_attr
*attr
)
4758 u64 __user
*uaddr
= kvm_get_attr_addr(attr
);
4764 return PTR_ERR(uaddr
);
4766 switch (attr
->attr
) {
4767 case KVM_X86_XCOMP_GUEST_SUPP
:
4768 if (put_user(kvm_caps
.supported_xcr0
, uaddr
))
4776 static int kvm_x86_dev_has_attr(struct kvm_device_attr
*attr
)
4781 switch (attr
->attr
) {
4782 case KVM_X86_XCOMP_GUEST_SUPP
:
4789 long kvm_arch_dev_ioctl(struct file
*filp
,
4790 unsigned int ioctl
, unsigned long arg
)
4792 void __user
*argp
= (void __user
*)arg
;
4796 case KVM_GET_MSR_INDEX_LIST
: {
4797 struct kvm_msr_list __user
*user_msr_list
= argp
;
4798 struct kvm_msr_list msr_list
;
4802 if (copy_from_user(&msr_list
, user_msr_list
, sizeof(msr_list
)))
4805 msr_list
.nmsrs
= num_msrs_to_save
+ num_emulated_msrs
;
4806 if (copy_to_user(user_msr_list
, &msr_list
, sizeof(msr_list
)))
4809 if (n
< msr_list
.nmsrs
)
4812 if (copy_to_user(user_msr_list
->indices
, &msrs_to_save
,
4813 num_msrs_to_save
* sizeof(u32
)))
4815 if (copy_to_user(user_msr_list
->indices
+ num_msrs_to_save
,
4817 num_emulated_msrs
* sizeof(u32
)))
4822 case KVM_GET_SUPPORTED_CPUID
:
4823 case KVM_GET_EMULATED_CPUID
: {
4824 struct kvm_cpuid2 __user
*cpuid_arg
= argp
;
4825 struct kvm_cpuid2 cpuid
;
4828 if (copy_from_user(&cpuid
, cpuid_arg
, sizeof(cpuid
)))
4831 r
= kvm_dev_ioctl_get_cpuid(&cpuid
, cpuid_arg
->entries
,
4837 if (copy_to_user(cpuid_arg
, &cpuid
, sizeof(cpuid
)))
4842 case KVM_X86_GET_MCE_CAP_SUPPORTED
:
4844 if (copy_to_user(argp
, &kvm_caps
.supported_mce_cap
,
4845 sizeof(kvm_caps
.supported_mce_cap
)))
4849 case KVM_GET_MSR_FEATURE_INDEX_LIST
: {
4850 struct kvm_msr_list __user
*user_msr_list
= argp
;
4851 struct kvm_msr_list msr_list
;
4855 if (copy_from_user(&msr_list
, user_msr_list
, sizeof(msr_list
)))
4858 msr_list
.nmsrs
= num_msr_based_features
;
4859 if (copy_to_user(user_msr_list
, &msr_list
, sizeof(msr_list
)))
4862 if (n
< msr_list
.nmsrs
)
4865 if (copy_to_user(user_msr_list
->indices
, &msr_based_features
,
4866 num_msr_based_features
* sizeof(u32
)))
4872 r
= msr_io(NULL
, argp
, do_get_msr_feature
, 1);
4874 case KVM_GET_SUPPORTED_HV_CPUID
:
4875 r
= kvm_ioctl_get_supported_hv_cpuid(NULL
, argp
);
4877 case KVM_GET_DEVICE_ATTR
: {
4878 struct kvm_device_attr attr
;
4880 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
4882 r
= kvm_x86_dev_get_attr(&attr
);
4885 case KVM_HAS_DEVICE_ATTR
: {
4886 struct kvm_device_attr attr
;
4888 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
4890 r
= kvm_x86_dev_has_attr(&attr
);
4901 static void wbinvd_ipi(void *garbage
)
4906 static bool need_emulate_wbinvd(struct kvm_vcpu
*vcpu
)
4908 return kvm_arch_has_noncoherent_dma(vcpu
->kvm
);
4911 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
4913 /* Address WBINVD may be executed by guest */
4914 if (need_emulate_wbinvd(vcpu
)) {
4915 if (static_call(kvm_x86_has_wbinvd_exit
)())
4916 cpumask_set_cpu(cpu
, vcpu
->arch
.wbinvd_dirty_mask
);
4917 else if (vcpu
->cpu
!= -1 && vcpu
->cpu
!= cpu
)
4918 smp_call_function_single(vcpu
->cpu
,
4919 wbinvd_ipi
, NULL
, 1);
4922 static_call(kvm_x86_vcpu_load
)(vcpu
, cpu
);
4924 /* Save host pkru register if supported */
4925 vcpu
->arch
.host_pkru
= read_pkru();
4927 /* Apply any externally detected TSC adjustments (due to suspend) */
4928 if (unlikely(vcpu
->arch
.tsc_offset_adjustment
)) {
4929 adjust_tsc_offset_host(vcpu
, vcpu
->arch
.tsc_offset_adjustment
);
4930 vcpu
->arch
.tsc_offset_adjustment
= 0;
4931 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
4934 if (unlikely(vcpu
->cpu
!= cpu
) || kvm_check_tsc_unstable()) {
4935 s64 tsc_delta
= !vcpu
->arch
.last_host_tsc
? 0 :
4936 rdtsc() - vcpu
->arch
.last_host_tsc
;
4938 mark_tsc_unstable("KVM discovered backwards TSC");
4940 if (kvm_check_tsc_unstable()) {
4941 u64 offset
= kvm_compute_l1_tsc_offset(vcpu
,
4942 vcpu
->arch
.last_guest_tsc
);
4943 kvm_vcpu_write_tsc_offset(vcpu
, offset
);
4944 vcpu
->arch
.tsc_catchup
= 1;
4947 if (kvm_lapic_hv_timer_in_use(vcpu
))
4948 kvm_lapic_restart_hv_timer(vcpu
);
4951 * On a host with synchronized TSC, there is no need to update
4952 * kvmclock on vcpu->cpu migration
4954 if (!vcpu
->kvm
->arch
.use_master_clock
|| vcpu
->cpu
== -1)
4955 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE
, vcpu
);
4956 if (vcpu
->cpu
!= cpu
)
4957 kvm_make_request(KVM_REQ_MIGRATE_TIMER
, vcpu
);
4961 kvm_make_request(KVM_REQ_STEAL_UPDATE
, vcpu
);
4964 static void kvm_steal_time_set_preempted(struct kvm_vcpu
*vcpu
)
4966 struct gfn_to_hva_cache
*ghc
= &vcpu
->arch
.st
.cache
;
4967 struct kvm_steal_time __user
*st
;
4968 struct kvm_memslots
*slots
;
4969 static const u8 preempted
= KVM_VCPU_PREEMPTED
;
4970 gpa_t gpa
= vcpu
->arch
.st
.msr_val
& KVM_STEAL_VALID_BITS
;
4973 * The vCPU can be marked preempted if and only if the VM-Exit was on
4974 * an instruction boundary and will not trigger guest emulation of any
4975 * kind (see vcpu_run). Vendor specific code controls (conservatively)
4976 * when this is true, for example allowing the vCPU to be marked
4977 * preempted if and only if the VM-Exit was due to a host interrupt.
4979 if (!vcpu
->arch
.at_instruction_boundary
) {
4980 vcpu
->stat
.preemption_other
++;
4984 vcpu
->stat
.preemption_reported
++;
4985 if (!(vcpu
->arch
.st
.msr_val
& KVM_MSR_ENABLED
))
4988 if (vcpu
->arch
.st
.preempted
)
4991 /* This happens on process exit */
4992 if (unlikely(current
->mm
!= vcpu
->kvm
->mm
))
4995 slots
= kvm_memslots(vcpu
->kvm
);
4997 if (unlikely(slots
->generation
!= ghc
->generation
||
4999 kvm_is_error_hva(ghc
->hva
) || !ghc
->memslot
))
5002 st
= (struct kvm_steal_time __user
*)ghc
->hva
;
5003 BUILD_BUG_ON(sizeof(st
->preempted
) != sizeof(preempted
));
5005 if (!copy_to_user_nofault(&st
->preempted
, &preempted
, sizeof(preempted
)))
5006 vcpu
->arch
.st
.preempted
= KVM_VCPU_PREEMPTED
;
5008 mark_page_dirty_in_slot(vcpu
->kvm
, ghc
->memslot
, gpa_to_gfn(ghc
->gpa
));
5011 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
5015 if (vcpu
->preempted
) {
5016 if (!vcpu
->arch
.guest_state_protected
)
5017 vcpu
->arch
.preempted_in_kernel
= !static_call(kvm_x86_get_cpl
)(vcpu
);
5020 * Take the srcu lock as memslots will be accessed to check the gfn
5021 * cache generation against the memslots generation.
5023 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
5024 if (kvm_xen_msr_enabled(vcpu
->kvm
))
5025 kvm_xen_runstate_set_preempted(vcpu
);
5027 kvm_steal_time_set_preempted(vcpu
);
5028 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
5031 static_call(kvm_x86_vcpu_put
)(vcpu
);
5032 vcpu
->arch
.last_host_tsc
= rdtsc();
5035 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu
*vcpu
,
5036 struct kvm_lapic_state
*s
)
5038 static_call_cond(kvm_x86_sync_pir_to_irr
)(vcpu
);
5040 return kvm_apic_get_state(vcpu
, s
);
5043 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu
*vcpu
,
5044 struct kvm_lapic_state
*s
)
5048 r
= kvm_apic_set_state(vcpu
, s
);
5051 update_cr8_intercept(vcpu
);
5056 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu
*vcpu
)
5059 * We can accept userspace's request for interrupt injection
5060 * as long as we have a place to store the interrupt number.
5061 * The actual injection will happen when the CPU is able to
5062 * deliver the interrupt.
5064 if (kvm_cpu_has_extint(vcpu
))
5067 /* Acknowledging ExtINT does not happen if LINT0 is masked. */
5068 return (!lapic_in_kernel(vcpu
) ||
5069 kvm_apic_accept_pic_intr(vcpu
));
5072 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu
*vcpu
)
5075 * Do not cause an interrupt window exit if an exception
5076 * is pending or an event needs reinjection; userspace
5077 * might want to inject the interrupt manually using KVM_SET_REGS
5078 * or KVM_SET_SREGS. For that to work, we must be at an
5079 * instruction boundary and with no events half-injected.
5081 return (kvm_arch_interrupt_allowed(vcpu
) &&
5082 kvm_cpu_accept_dm_intr(vcpu
) &&
5083 !kvm_event_needs_reinjection(vcpu
) &&
5084 !kvm_is_exception_pending(vcpu
));
5087 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
5088 struct kvm_interrupt
*irq
)
5090 if (irq
->irq
>= KVM_NR_INTERRUPTS
)
5093 if (!irqchip_in_kernel(vcpu
->kvm
)) {
5094 kvm_queue_interrupt(vcpu
, irq
->irq
, false);
5095 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5100 * With in-kernel LAPIC, we only use this to inject EXTINT, so
5101 * fail for in-kernel 8259.
5103 if (pic_in_kernel(vcpu
->kvm
))
5106 if (vcpu
->arch
.pending_external_vector
!= -1)
5109 vcpu
->arch
.pending_external_vector
= irq
->irq
;
5110 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5114 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu
*vcpu
)
5116 kvm_inject_nmi(vcpu
);
5121 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu
*vcpu
,
5122 struct kvm_tpr_access_ctl
*tac
)
5126 vcpu
->arch
.tpr_access_reporting
= !!tac
->enabled
;
5130 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu
*vcpu
,
5134 unsigned bank_num
= mcg_cap
& 0xff, bank
;
5137 if (!bank_num
|| bank_num
> KVM_MAX_MCE_BANKS
)
5139 if (mcg_cap
& ~(kvm_caps
.supported_mce_cap
| 0xff | 0xff0000))
5142 vcpu
->arch
.mcg_cap
= mcg_cap
;
5143 /* Init IA32_MCG_CTL to all 1s */
5144 if (mcg_cap
& MCG_CTL_P
)
5145 vcpu
->arch
.mcg_ctl
= ~(u64
)0;
5146 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */
5147 for (bank
= 0; bank
< bank_num
; bank
++) {
5148 vcpu
->arch
.mce_banks
[bank
*4] = ~(u64
)0;
5149 if (mcg_cap
& MCG_CMCI_P
)
5150 vcpu
->arch
.mci_ctl2_banks
[bank
] = 0;
5153 kvm_apic_after_set_mcg_cap(vcpu
);
5155 static_call(kvm_x86_setup_mce
)(vcpu
);
5161 * Validate this is an UCNA (uncorrectable no action) error by checking the
5162 * MCG_STATUS and MCi_STATUS registers:
5163 * - none of the bits for Machine Check Exceptions are set
5164 * - both the VAL (valid) and UC (uncorrectable) bits are set
5165 * MCI_STATUS_PCC - Processor Context Corrupted
5166 * MCI_STATUS_S - Signaled as a Machine Check Exception
5167 * MCI_STATUS_AR - Software recoverable Action Required
5169 static bool is_ucna(struct kvm_x86_mce
*mce
)
5171 return !mce
->mcg_status
&&
5172 !(mce
->status
& (MCI_STATUS_PCC
| MCI_STATUS_S
| MCI_STATUS_AR
)) &&
5173 (mce
->status
& MCI_STATUS_VAL
) &&
5174 (mce
->status
& MCI_STATUS_UC
);
5177 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu
*vcpu
, struct kvm_x86_mce
*mce
, u64
* banks
)
5179 u64 mcg_cap
= vcpu
->arch
.mcg_cap
;
5181 banks
[1] = mce
->status
;
5182 banks
[2] = mce
->addr
;
5183 banks
[3] = mce
->misc
;
5184 vcpu
->arch
.mcg_status
= mce
->mcg_status
;
5186 if (!(mcg_cap
& MCG_CMCI_P
) ||
5187 !(vcpu
->arch
.mci_ctl2_banks
[mce
->bank
] & MCI_CTL2_CMCI_EN
))
5190 if (lapic_in_kernel(vcpu
))
5191 kvm_apic_local_deliver(vcpu
->arch
.apic
, APIC_LVTCMCI
);
5196 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu
*vcpu
,
5197 struct kvm_x86_mce
*mce
)
5199 u64 mcg_cap
= vcpu
->arch
.mcg_cap
;
5200 unsigned bank_num
= mcg_cap
& 0xff;
5201 u64
*banks
= vcpu
->arch
.mce_banks
;
5203 if (mce
->bank
>= bank_num
|| !(mce
->status
& MCI_STATUS_VAL
))
5206 banks
+= array_index_nospec(4 * mce
->bank
, 4 * bank_num
);
5209 return kvm_vcpu_x86_set_ucna(vcpu
, mce
, banks
);
5212 * if IA32_MCG_CTL is not all 1s, the uncorrected error
5213 * reporting is disabled
5215 if ((mce
->status
& MCI_STATUS_UC
) && (mcg_cap
& MCG_CTL_P
) &&
5216 vcpu
->arch
.mcg_ctl
!= ~(u64
)0)
5219 * if IA32_MCi_CTL is not all 1s, the uncorrected error
5220 * reporting is disabled for the bank
5222 if ((mce
->status
& MCI_STATUS_UC
) && banks
[0] != ~(u64
)0)
5224 if (mce
->status
& MCI_STATUS_UC
) {
5225 if ((vcpu
->arch
.mcg_status
& MCG_STATUS_MCIP
) ||
5226 !kvm_is_cr4_bit_set(vcpu
, X86_CR4_MCE
)) {
5227 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
5230 if (banks
[1] & MCI_STATUS_VAL
)
5231 mce
->status
|= MCI_STATUS_OVER
;
5232 banks
[2] = mce
->addr
;
5233 banks
[3] = mce
->misc
;
5234 vcpu
->arch
.mcg_status
= mce
->mcg_status
;
5235 banks
[1] = mce
->status
;
5236 kvm_queue_exception(vcpu
, MC_VECTOR
);
5237 } else if (!(banks
[1] & MCI_STATUS_VAL
)
5238 || !(banks
[1] & MCI_STATUS_UC
)) {
5239 if (banks
[1] & MCI_STATUS_VAL
)
5240 mce
->status
|= MCI_STATUS_OVER
;
5241 banks
[2] = mce
->addr
;
5242 banks
[3] = mce
->misc
;
5243 banks
[1] = mce
->status
;
5245 banks
[1] |= MCI_STATUS_OVER
;
5249 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu
*vcpu
,
5250 struct kvm_vcpu_events
*events
)
5252 struct kvm_queued_exception
*ex
;
5256 #ifdef CONFIG_KVM_SMM
5257 if (kvm_check_request(KVM_REQ_SMI
, vcpu
))
5262 * KVM's ABI only allows for one exception to be migrated. Luckily,
5263 * the only time there can be two queued exceptions is if there's a
5264 * non-exiting _injected_ exception, and a pending exiting exception.
5265 * In that case, ignore the VM-Exiting exception as it's an extension
5266 * of the injected exception.
5268 if (vcpu
->arch
.exception_vmexit
.pending
&&
5269 !vcpu
->arch
.exception
.pending
&&
5270 !vcpu
->arch
.exception
.injected
)
5271 ex
= &vcpu
->arch
.exception_vmexit
;
5273 ex
= &vcpu
->arch
.exception
;
5276 * In guest mode, payload delivery should be deferred if the exception
5277 * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
5278 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability,
5279 * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
5280 * propagate the payload and so it cannot be safely deferred. Deliver
5281 * the payload if the capability hasn't been requested.
5283 if (!vcpu
->kvm
->arch
.exception_payload_enabled
&&
5284 ex
->pending
&& ex
->has_payload
)
5285 kvm_deliver_exception_payload(vcpu
, ex
);
5287 memset(events
, 0, sizeof(*events
));
5290 * The API doesn't provide the instruction length for software
5291 * exceptions, so don't report them. As long as the guest RIP
5292 * isn't advanced, we should expect to encounter the exception
5295 if (!kvm_exception_is_soft(ex
->vector
)) {
5296 events
->exception
.injected
= ex
->injected
;
5297 events
->exception
.pending
= ex
->pending
;
5299 * For ABI compatibility, deliberately conflate
5300 * pending and injected exceptions when
5301 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
5303 if (!vcpu
->kvm
->arch
.exception_payload_enabled
)
5304 events
->exception
.injected
|= ex
->pending
;
5306 events
->exception
.nr
= ex
->vector
;
5307 events
->exception
.has_error_code
= ex
->has_error_code
;
5308 events
->exception
.error_code
= ex
->error_code
;
5309 events
->exception_has_payload
= ex
->has_payload
;
5310 events
->exception_payload
= ex
->payload
;
5312 events
->interrupt
.injected
=
5313 vcpu
->arch
.interrupt
.injected
&& !vcpu
->arch
.interrupt
.soft
;
5314 events
->interrupt
.nr
= vcpu
->arch
.interrupt
.nr
;
5315 events
->interrupt
.shadow
= static_call(kvm_x86_get_interrupt_shadow
)(vcpu
);
5317 events
->nmi
.injected
= vcpu
->arch
.nmi_injected
;
5318 events
->nmi
.pending
= kvm_get_nr_pending_nmis(vcpu
);
5319 events
->nmi
.masked
= static_call(kvm_x86_get_nmi_mask
)(vcpu
);
5321 /* events->sipi_vector is never valid when reporting to user space */
5323 #ifdef CONFIG_KVM_SMM
5324 events
->smi
.smm
= is_smm(vcpu
);
5325 events
->smi
.pending
= vcpu
->arch
.smi_pending
;
5326 events
->smi
.smm_inside_nmi
=
5327 !!(vcpu
->arch
.hflags
& HF_SMM_INSIDE_NMI_MASK
);
5329 events
->smi
.latched_init
= kvm_lapic_latched_init(vcpu
);
5331 events
->flags
= (KVM_VCPUEVENT_VALID_NMI_PENDING
5332 | KVM_VCPUEVENT_VALID_SHADOW
5333 | KVM_VCPUEVENT_VALID_SMM
);
5334 if (vcpu
->kvm
->arch
.exception_payload_enabled
)
5335 events
->flags
|= KVM_VCPUEVENT_VALID_PAYLOAD
;
5336 if (vcpu
->kvm
->arch
.triple_fault_event
) {
5337 events
->triple_fault
.pending
= kvm_test_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
5338 events
->flags
|= KVM_VCPUEVENT_VALID_TRIPLE_FAULT
;
5342 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu
*vcpu
,
5343 struct kvm_vcpu_events
*events
)
5345 if (events
->flags
& ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5346 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
5347 | KVM_VCPUEVENT_VALID_SHADOW
5348 | KVM_VCPUEVENT_VALID_SMM
5349 | KVM_VCPUEVENT_VALID_PAYLOAD
5350 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT
))
5353 if (events
->flags
& KVM_VCPUEVENT_VALID_PAYLOAD
) {
5354 if (!vcpu
->kvm
->arch
.exception_payload_enabled
)
5356 if (events
->exception
.pending
)
5357 events
->exception
.injected
= 0;
5359 events
->exception_has_payload
= 0;
5361 events
->exception
.pending
= 0;
5362 events
->exception_has_payload
= 0;
5365 if ((events
->exception
.injected
|| events
->exception
.pending
) &&
5366 (events
->exception
.nr
> 31 || events
->exception
.nr
== NMI_VECTOR
))
5369 /* INITs are latched while in SMM */
5370 if (events
->flags
& KVM_VCPUEVENT_VALID_SMM
&&
5371 (events
->smi
.smm
|| events
->smi
.pending
) &&
5372 vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
)
5378 * Flag that userspace is stuffing an exception, the next KVM_RUN will
5379 * morph the exception to a VM-Exit if appropriate. Do this only for
5380 * pending exceptions, already-injected exceptions are not subject to
5381 * intercpetion. Note, userspace that conflates pending and injected
5382 * is hosed, and will incorrectly convert an injected exception into a
5383 * pending exception, which in turn may cause a spurious VM-Exit.
5385 vcpu
->arch
.exception_from_userspace
= events
->exception
.pending
;
5387 vcpu
->arch
.exception_vmexit
.pending
= false;
5389 vcpu
->arch
.exception
.injected
= events
->exception
.injected
;
5390 vcpu
->arch
.exception
.pending
= events
->exception
.pending
;
5391 vcpu
->arch
.exception
.vector
= events
->exception
.nr
;
5392 vcpu
->arch
.exception
.has_error_code
= events
->exception
.has_error_code
;
5393 vcpu
->arch
.exception
.error_code
= events
->exception
.error_code
;
5394 vcpu
->arch
.exception
.has_payload
= events
->exception_has_payload
;
5395 vcpu
->arch
.exception
.payload
= events
->exception_payload
;
5397 vcpu
->arch
.interrupt
.injected
= events
->interrupt
.injected
;
5398 vcpu
->arch
.interrupt
.nr
= events
->interrupt
.nr
;
5399 vcpu
->arch
.interrupt
.soft
= events
->interrupt
.soft
;
5400 if (events
->flags
& KVM_VCPUEVENT_VALID_SHADOW
)
5401 static_call(kvm_x86_set_interrupt_shadow
)(vcpu
,
5402 events
->interrupt
.shadow
);
5404 vcpu
->arch
.nmi_injected
= events
->nmi
.injected
;
5405 if (events
->flags
& KVM_VCPUEVENT_VALID_NMI_PENDING
) {
5406 vcpu
->arch
.nmi_pending
= 0;
5407 atomic_set(&vcpu
->arch
.nmi_queued
, events
->nmi
.pending
);
5408 kvm_make_request(KVM_REQ_NMI
, vcpu
);
5410 static_call(kvm_x86_set_nmi_mask
)(vcpu
, events
->nmi
.masked
);
5412 if (events
->flags
& KVM_VCPUEVENT_VALID_SIPI_VECTOR
&&
5413 lapic_in_kernel(vcpu
))
5414 vcpu
->arch
.apic
->sipi_vector
= events
->sipi_vector
;
5416 if (events
->flags
& KVM_VCPUEVENT_VALID_SMM
) {
5417 #ifdef CONFIG_KVM_SMM
5418 if (!!(vcpu
->arch
.hflags
& HF_SMM_MASK
) != events
->smi
.smm
) {
5419 kvm_leave_nested(vcpu
);
5420 kvm_smm_changed(vcpu
, events
->smi
.smm
);
5423 vcpu
->arch
.smi_pending
= events
->smi
.pending
;
5425 if (events
->smi
.smm
) {
5426 if (events
->smi
.smm_inside_nmi
)
5427 vcpu
->arch
.hflags
|= HF_SMM_INSIDE_NMI_MASK
;
5429 vcpu
->arch
.hflags
&= ~HF_SMM_INSIDE_NMI_MASK
;
5433 if (events
->smi
.smm
|| events
->smi
.pending
||
5434 events
->smi
.smm_inside_nmi
)
5438 if (lapic_in_kernel(vcpu
)) {
5439 if (events
->smi
.latched_init
)
5440 set_bit(KVM_APIC_INIT
, &vcpu
->arch
.apic
->pending_events
);
5442 clear_bit(KVM_APIC_INIT
, &vcpu
->arch
.apic
->pending_events
);
5446 if (events
->flags
& KVM_VCPUEVENT_VALID_TRIPLE_FAULT
) {
5447 if (!vcpu
->kvm
->arch
.triple_fault_event
)
5449 if (events
->triple_fault
.pending
)
5450 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
5452 kvm_clear_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
5455 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5460 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu
*vcpu
,
5461 struct kvm_debugregs
*dbgregs
)
5465 memset(dbgregs
, 0, sizeof(*dbgregs
));
5466 memcpy(dbgregs
->db
, vcpu
->arch
.db
, sizeof(vcpu
->arch
.db
));
5467 kvm_get_dr(vcpu
, 6, &val
);
5469 dbgregs
->dr7
= vcpu
->arch
.dr7
;
5472 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu
*vcpu
,
5473 struct kvm_debugregs
*dbgregs
)
5478 if (!kvm_dr6_valid(dbgregs
->dr6
))
5480 if (!kvm_dr7_valid(dbgregs
->dr7
))
5483 memcpy(vcpu
->arch
.db
, dbgregs
->db
, sizeof(vcpu
->arch
.db
));
5484 kvm_update_dr0123(vcpu
);
5485 vcpu
->arch
.dr6
= dbgregs
->dr6
;
5486 vcpu
->arch
.dr7
= dbgregs
->dr7
;
5487 kvm_update_dr7(vcpu
);
5493 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu
*vcpu
,
5494 u8
*state
, unsigned int size
)
5497 * Only copy state for features that are enabled for the guest. The
5498 * state itself isn't problematic, but setting bits in the header for
5499 * features that are supported in *this* host but not exposed to the
5500 * guest can result in KVM_SET_XSAVE failing when live migrating to a
5501 * compatible host without the features that are NOT exposed to the
5504 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
5505 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
5506 * supported by the host.
5508 u64 supported_xcr0
= vcpu
->arch
.guest_supported_xcr0
|
5509 XFEATURE_MASK_FPSSE
;
5511 if (fpstate_is_confidential(&vcpu
->arch
.guest_fpu
))
5514 fpu_copy_guest_fpstate_to_uabi(&vcpu
->arch
.guest_fpu
, state
, size
,
5515 supported_xcr0
, vcpu
->arch
.pkru
);
5518 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu
*vcpu
,
5519 struct kvm_xsave
*guest_xsave
)
5521 return kvm_vcpu_ioctl_x86_get_xsave2(vcpu
, (void *)guest_xsave
->region
,
5522 sizeof(guest_xsave
->region
));
5525 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu
*vcpu
,
5526 struct kvm_xsave
*guest_xsave
)
5528 if (fpstate_is_confidential(&vcpu
->arch
.guest_fpu
))
5531 return fpu_copy_uabi_to_guest_fpstate(&vcpu
->arch
.guest_fpu
,
5532 guest_xsave
->region
,
5533 kvm_caps
.supported_xcr0
,
5537 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu
*vcpu
,
5538 struct kvm_xcrs
*guest_xcrs
)
5540 if (!boot_cpu_has(X86_FEATURE_XSAVE
)) {
5541 guest_xcrs
->nr_xcrs
= 0;
5545 guest_xcrs
->nr_xcrs
= 1;
5546 guest_xcrs
->flags
= 0;
5547 guest_xcrs
->xcrs
[0].xcr
= XCR_XFEATURE_ENABLED_MASK
;
5548 guest_xcrs
->xcrs
[0].value
= vcpu
->arch
.xcr0
;
5551 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu
*vcpu
,
5552 struct kvm_xcrs
*guest_xcrs
)
5556 if (!boot_cpu_has(X86_FEATURE_XSAVE
))
5559 if (guest_xcrs
->nr_xcrs
> KVM_MAX_XCRS
|| guest_xcrs
->flags
)
5562 for (i
= 0; i
< guest_xcrs
->nr_xcrs
; i
++)
5563 /* Only support XCR0 currently */
5564 if (guest_xcrs
->xcrs
[i
].xcr
== XCR_XFEATURE_ENABLED_MASK
) {
5565 r
= __kvm_set_xcr(vcpu
, XCR_XFEATURE_ENABLED_MASK
,
5566 guest_xcrs
->xcrs
[i
].value
);
5575 * kvm_set_guest_paused() indicates to the guest kernel that it has been
5576 * stopped by the hypervisor. This function will be called from the host only.
5577 * EINVAL is returned when the host attempts to set the flag for a guest that
5578 * does not support pv clocks.
5580 static int kvm_set_guest_paused(struct kvm_vcpu
*vcpu
)
5582 if (!vcpu
->arch
.pv_time
.active
)
5584 vcpu
->arch
.pvclock_set_guest_stopped_request
= true;
5585 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
5589 static int kvm_arch_tsc_has_attr(struct kvm_vcpu
*vcpu
,
5590 struct kvm_device_attr
*attr
)
5594 switch (attr
->attr
) {
5595 case KVM_VCPU_TSC_OFFSET
:
5605 static int kvm_arch_tsc_get_attr(struct kvm_vcpu
*vcpu
,
5606 struct kvm_device_attr
*attr
)
5608 u64 __user
*uaddr
= kvm_get_attr_addr(attr
);
5612 return PTR_ERR(uaddr
);
5614 switch (attr
->attr
) {
5615 case KVM_VCPU_TSC_OFFSET
:
5617 if (put_user(vcpu
->arch
.l1_tsc_offset
, uaddr
))
5628 static int kvm_arch_tsc_set_attr(struct kvm_vcpu
*vcpu
,
5629 struct kvm_device_attr
*attr
)
5631 u64 __user
*uaddr
= kvm_get_attr_addr(attr
);
5632 struct kvm
*kvm
= vcpu
->kvm
;
5636 return PTR_ERR(uaddr
);
5638 switch (attr
->attr
) {
5639 case KVM_VCPU_TSC_OFFSET
: {
5640 u64 offset
, tsc
, ns
;
5641 unsigned long flags
;
5645 if (get_user(offset
, uaddr
))
5648 raw_spin_lock_irqsave(&kvm
->arch
.tsc_write_lock
, flags
);
5650 matched
= (vcpu
->arch
.virtual_tsc_khz
&&
5651 kvm
->arch
.last_tsc_khz
== vcpu
->arch
.virtual_tsc_khz
&&
5652 kvm
->arch
.last_tsc_offset
== offset
);
5654 tsc
= kvm_scale_tsc(rdtsc(), vcpu
->arch
.l1_tsc_scaling_ratio
) + offset
;
5655 ns
= get_kvmclock_base_ns();
5657 kvm
->arch
.user_set_tsc
= true;
5658 __kvm_synchronize_tsc(vcpu
, offset
, tsc
, ns
, matched
);
5659 raw_spin_unlock_irqrestore(&kvm
->arch
.tsc_write_lock
, flags
);
5671 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu
*vcpu
,
5675 struct kvm_device_attr attr
;
5678 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
5681 if (attr
.group
!= KVM_VCPU_TSC_CTRL
)
5685 case KVM_HAS_DEVICE_ATTR
:
5686 r
= kvm_arch_tsc_has_attr(vcpu
, &attr
);
5688 case KVM_GET_DEVICE_ATTR
:
5689 r
= kvm_arch_tsc_get_attr(vcpu
, &attr
);
5691 case KVM_SET_DEVICE_ATTR
:
5692 r
= kvm_arch_tsc_set_attr(vcpu
, &attr
);
5699 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
5700 struct kvm_enable_cap
*cap
)
5703 uint16_t vmcs_version
;
5704 void __user
*user_ptr
;
5710 case KVM_CAP_HYPERV_SYNIC2
:
5715 case KVM_CAP_HYPERV_SYNIC
:
5716 if (!irqchip_in_kernel(vcpu
->kvm
))
5718 return kvm_hv_activate_synic(vcpu
, cap
->cap
==
5719 KVM_CAP_HYPERV_SYNIC2
);
5720 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS
:
5721 if (!kvm_x86_ops
.nested_ops
->enable_evmcs
)
5723 r
= kvm_x86_ops
.nested_ops
->enable_evmcs(vcpu
, &vmcs_version
);
5725 user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
5726 if (copy_to_user(user_ptr
, &vmcs_version
,
5727 sizeof(vmcs_version
)))
5731 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH
:
5732 if (!kvm_x86_ops
.enable_l2_tlb_flush
)
5735 return static_call(kvm_x86_enable_l2_tlb_flush
)(vcpu
);
5737 case KVM_CAP_HYPERV_ENFORCE_CPUID
:
5738 return kvm_hv_set_enforce_cpuid(vcpu
, cap
->args
[0]);
5740 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID
:
5741 vcpu
->arch
.pv_cpuid
.enforce
= cap
->args
[0];
5742 if (vcpu
->arch
.pv_cpuid
.enforce
)
5743 kvm_update_pv_runtime(vcpu
);
5751 long kvm_arch_vcpu_ioctl(struct file
*filp
,
5752 unsigned int ioctl
, unsigned long arg
)
5754 struct kvm_vcpu
*vcpu
= filp
->private_data
;
5755 void __user
*argp
= (void __user
*)arg
;
5758 struct kvm_sregs2
*sregs2
;
5759 struct kvm_lapic_state
*lapic
;
5760 struct kvm_xsave
*xsave
;
5761 struct kvm_xcrs
*xcrs
;
5769 case KVM_GET_LAPIC
: {
5771 if (!lapic_in_kernel(vcpu
))
5773 u
.lapic
= kzalloc(sizeof(struct kvm_lapic_state
),
5774 GFP_KERNEL_ACCOUNT
);
5779 r
= kvm_vcpu_ioctl_get_lapic(vcpu
, u
.lapic
);
5783 if (copy_to_user(argp
, u
.lapic
, sizeof(struct kvm_lapic_state
)))
5788 case KVM_SET_LAPIC
: {
5790 if (!lapic_in_kernel(vcpu
))
5792 u
.lapic
= memdup_user(argp
, sizeof(*u
.lapic
));
5793 if (IS_ERR(u
.lapic
)) {
5794 r
= PTR_ERR(u
.lapic
);
5798 r
= kvm_vcpu_ioctl_set_lapic(vcpu
, u
.lapic
);
5801 case KVM_INTERRUPT
: {
5802 struct kvm_interrupt irq
;
5805 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
5807 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
5811 r
= kvm_vcpu_ioctl_nmi(vcpu
);
5815 r
= kvm_inject_smi(vcpu
);
5818 case KVM_SET_CPUID
: {
5819 struct kvm_cpuid __user
*cpuid_arg
= argp
;
5820 struct kvm_cpuid cpuid
;
5823 if (copy_from_user(&cpuid
, cpuid_arg
, sizeof(cpuid
)))
5825 r
= kvm_vcpu_ioctl_set_cpuid(vcpu
, &cpuid
, cpuid_arg
->entries
);
5828 case KVM_SET_CPUID2
: {
5829 struct kvm_cpuid2 __user
*cpuid_arg
= argp
;
5830 struct kvm_cpuid2 cpuid
;
5833 if (copy_from_user(&cpuid
, cpuid_arg
, sizeof(cpuid
)))
5835 r
= kvm_vcpu_ioctl_set_cpuid2(vcpu
, &cpuid
,
5836 cpuid_arg
->entries
);
5839 case KVM_GET_CPUID2
: {
5840 struct kvm_cpuid2 __user
*cpuid_arg
= argp
;
5841 struct kvm_cpuid2 cpuid
;
5844 if (copy_from_user(&cpuid
, cpuid_arg
, sizeof(cpuid
)))
5846 r
= kvm_vcpu_ioctl_get_cpuid2(vcpu
, &cpuid
,
5847 cpuid_arg
->entries
);
5851 if (copy_to_user(cpuid_arg
, &cpuid
, sizeof(cpuid
)))
5856 case KVM_GET_MSRS
: {
5857 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
5858 r
= msr_io(vcpu
, argp
, do_get_msr
, 1);
5859 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
5862 case KVM_SET_MSRS
: {
5863 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
5864 r
= msr_io(vcpu
, argp
, do_set_msr
, 0);
5865 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
5868 case KVM_TPR_ACCESS_REPORTING
: {
5869 struct kvm_tpr_access_ctl tac
;
5872 if (copy_from_user(&tac
, argp
, sizeof(tac
)))
5874 r
= vcpu_ioctl_tpr_access_reporting(vcpu
, &tac
);
5878 if (copy_to_user(argp
, &tac
, sizeof(tac
)))
5883 case KVM_SET_VAPIC_ADDR
: {
5884 struct kvm_vapic_addr va
;
5888 if (!lapic_in_kernel(vcpu
))
5891 if (copy_from_user(&va
, argp
, sizeof(va
)))
5893 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
5894 r
= kvm_lapic_set_vapic_addr(vcpu
, va
.vapic_addr
);
5895 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
5898 case KVM_X86_SETUP_MCE
: {
5902 if (copy_from_user(&mcg_cap
, argp
, sizeof(mcg_cap
)))
5904 r
= kvm_vcpu_ioctl_x86_setup_mce(vcpu
, mcg_cap
);
5907 case KVM_X86_SET_MCE
: {
5908 struct kvm_x86_mce mce
;
5911 if (copy_from_user(&mce
, argp
, sizeof(mce
)))
5913 r
= kvm_vcpu_ioctl_x86_set_mce(vcpu
, &mce
);
5916 case KVM_GET_VCPU_EVENTS
: {
5917 struct kvm_vcpu_events events
;
5919 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu
, &events
);
5922 if (copy_to_user(argp
, &events
, sizeof(struct kvm_vcpu_events
)))
5927 case KVM_SET_VCPU_EVENTS
: {
5928 struct kvm_vcpu_events events
;
5931 if (copy_from_user(&events
, argp
, sizeof(struct kvm_vcpu_events
)))
5934 r
= kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu
, &events
);
5937 case KVM_GET_DEBUGREGS
: {
5938 struct kvm_debugregs dbgregs
;
5940 kvm_vcpu_ioctl_x86_get_debugregs(vcpu
, &dbgregs
);
5943 if (copy_to_user(argp
, &dbgregs
,
5944 sizeof(struct kvm_debugregs
)))
5949 case KVM_SET_DEBUGREGS
: {
5950 struct kvm_debugregs dbgregs
;
5953 if (copy_from_user(&dbgregs
, argp
,
5954 sizeof(struct kvm_debugregs
)))
5957 r
= kvm_vcpu_ioctl_x86_set_debugregs(vcpu
, &dbgregs
);
5960 case KVM_GET_XSAVE
: {
5962 if (vcpu
->arch
.guest_fpu
.uabi_size
> sizeof(struct kvm_xsave
))
5965 u
.xsave
= kzalloc(sizeof(struct kvm_xsave
), GFP_KERNEL_ACCOUNT
);
5970 kvm_vcpu_ioctl_x86_get_xsave(vcpu
, u
.xsave
);
5973 if (copy_to_user(argp
, u
.xsave
, sizeof(struct kvm_xsave
)))
5978 case KVM_SET_XSAVE
: {
5979 int size
= vcpu
->arch
.guest_fpu
.uabi_size
;
5981 u
.xsave
= memdup_user(argp
, size
);
5982 if (IS_ERR(u
.xsave
)) {
5983 r
= PTR_ERR(u
.xsave
);
5987 r
= kvm_vcpu_ioctl_x86_set_xsave(vcpu
, u
.xsave
);
5991 case KVM_GET_XSAVE2
: {
5992 int size
= vcpu
->arch
.guest_fpu
.uabi_size
;
5994 u
.xsave
= kzalloc(size
, GFP_KERNEL_ACCOUNT
);
5999 kvm_vcpu_ioctl_x86_get_xsave2(vcpu
, u
.buffer
, size
);
6002 if (copy_to_user(argp
, u
.xsave
, size
))
6009 case KVM_GET_XCRS
: {
6010 u
.xcrs
= kzalloc(sizeof(struct kvm_xcrs
), GFP_KERNEL_ACCOUNT
);
6015 kvm_vcpu_ioctl_x86_get_xcrs(vcpu
, u
.xcrs
);
6018 if (copy_to_user(argp
, u
.xcrs
,
6019 sizeof(struct kvm_xcrs
)))
6024 case KVM_SET_XCRS
: {
6025 u
.xcrs
= memdup_user(argp
, sizeof(*u
.xcrs
));
6026 if (IS_ERR(u
.xcrs
)) {
6027 r
= PTR_ERR(u
.xcrs
);
6031 r
= kvm_vcpu_ioctl_x86_set_xcrs(vcpu
, u
.xcrs
);
6034 case KVM_SET_TSC_KHZ
: {
6038 user_tsc_khz
= (u32
)arg
;
6040 if (kvm_caps
.has_tsc_control
&&
6041 user_tsc_khz
>= kvm_caps
.max_guest_tsc_khz
)
6044 if (user_tsc_khz
== 0)
6045 user_tsc_khz
= tsc_khz
;
6047 if (!kvm_set_tsc_khz(vcpu
, user_tsc_khz
))
6052 case KVM_GET_TSC_KHZ
: {
6053 r
= vcpu
->arch
.virtual_tsc_khz
;
6056 case KVM_KVMCLOCK_CTRL
: {
6057 r
= kvm_set_guest_paused(vcpu
);
6060 case KVM_ENABLE_CAP
: {
6061 struct kvm_enable_cap cap
;
6064 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
6066 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
6069 case KVM_GET_NESTED_STATE
: {
6070 struct kvm_nested_state __user
*user_kvm_nested_state
= argp
;
6074 if (!kvm_x86_ops
.nested_ops
->get_state
)
6077 BUILD_BUG_ON(sizeof(user_data_size
) != sizeof(user_kvm_nested_state
->size
));
6079 if (get_user(user_data_size
, &user_kvm_nested_state
->size
))
6082 r
= kvm_x86_ops
.nested_ops
->get_state(vcpu
, user_kvm_nested_state
,
6087 if (r
> user_data_size
) {
6088 if (put_user(r
, &user_kvm_nested_state
->size
))
6098 case KVM_SET_NESTED_STATE
: {
6099 struct kvm_nested_state __user
*user_kvm_nested_state
= argp
;
6100 struct kvm_nested_state kvm_state
;
6104 if (!kvm_x86_ops
.nested_ops
->set_state
)
6108 if (copy_from_user(&kvm_state
, user_kvm_nested_state
, sizeof(kvm_state
)))
6112 if (kvm_state
.size
< sizeof(kvm_state
))
6115 if (kvm_state
.flags
&
6116 ~(KVM_STATE_NESTED_RUN_PENDING
| KVM_STATE_NESTED_GUEST_MODE
6117 | KVM_STATE_NESTED_EVMCS
| KVM_STATE_NESTED_MTF_PENDING
6118 | KVM_STATE_NESTED_GIF_SET
))
6121 /* nested_run_pending implies guest_mode. */
6122 if ((kvm_state
.flags
& KVM_STATE_NESTED_RUN_PENDING
)
6123 && !(kvm_state
.flags
& KVM_STATE_NESTED_GUEST_MODE
))
6126 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
6127 r
= kvm_x86_ops
.nested_ops
->set_state(vcpu
, user_kvm_nested_state
, &kvm_state
);
6128 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
6131 case KVM_GET_SUPPORTED_HV_CPUID
:
6132 r
= kvm_ioctl_get_supported_hv_cpuid(vcpu
, argp
);
6134 #ifdef CONFIG_KVM_XEN
6135 case KVM_XEN_VCPU_GET_ATTR
: {
6136 struct kvm_xen_vcpu_attr xva
;
6139 if (copy_from_user(&xva
, argp
, sizeof(xva
)))
6141 r
= kvm_xen_vcpu_get_attr(vcpu
, &xva
);
6142 if (!r
&& copy_to_user(argp
, &xva
, sizeof(xva
)))
6146 case KVM_XEN_VCPU_SET_ATTR
: {
6147 struct kvm_xen_vcpu_attr xva
;
6150 if (copy_from_user(&xva
, argp
, sizeof(xva
)))
6152 r
= kvm_xen_vcpu_set_attr(vcpu
, &xva
);
6156 case KVM_GET_SREGS2
: {
6157 u
.sregs2
= kzalloc(sizeof(struct kvm_sregs2
), GFP_KERNEL
);
6161 __get_sregs2(vcpu
, u
.sregs2
);
6163 if (copy_to_user(argp
, u
.sregs2
, sizeof(struct kvm_sregs2
)))
6168 case KVM_SET_SREGS2
: {
6169 u
.sregs2
= memdup_user(argp
, sizeof(struct kvm_sregs2
));
6170 if (IS_ERR(u
.sregs2
)) {
6171 r
= PTR_ERR(u
.sregs2
);
6175 r
= __set_sregs2(vcpu
, u
.sregs2
);
6178 case KVM_HAS_DEVICE_ATTR
:
6179 case KVM_GET_DEVICE_ATTR
:
6180 case KVM_SET_DEVICE_ATTR
:
6181 r
= kvm_vcpu_ioctl_device_attr(vcpu
, ioctl
, argp
);
6193 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
6195 return VM_FAULT_SIGBUS
;
6198 static int kvm_vm_ioctl_set_tss_addr(struct kvm
*kvm
, unsigned long addr
)
6202 if (addr
> (unsigned int)(-3 * PAGE_SIZE
))
6204 ret
= static_call(kvm_x86_set_tss_addr
)(kvm
, addr
);
6208 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm
*kvm
,
6211 return static_call(kvm_x86_set_identity_map_addr
)(kvm
, ident_addr
);
6214 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm
*kvm
,
6215 unsigned long kvm_nr_mmu_pages
)
6217 if (kvm_nr_mmu_pages
< KVM_MIN_ALLOC_MMU_PAGES
)
6220 mutex_lock(&kvm
->slots_lock
);
6222 kvm_mmu_change_mmu_pages(kvm
, kvm_nr_mmu_pages
);
6223 kvm
->arch
.n_requested_mmu_pages
= kvm_nr_mmu_pages
;
6225 mutex_unlock(&kvm
->slots_lock
);
6229 static int kvm_vm_ioctl_get_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
6231 struct kvm_pic
*pic
= kvm
->arch
.vpic
;
6235 switch (chip
->chip_id
) {
6236 case KVM_IRQCHIP_PIC_MASTER
:
6237 memcpy(&chip
->chip
.pic
, &pic
->pics
[0],
6238 sizeof(struct kvm_pic_state
));
6240 case KVM_IRQCHIP_PIC_SLAVE
:
6241 memcpy(&chip
->chip
.pic
, &pic
->pics
[1],
6242 sizeof(struct kvm_pic_state
));
6244 case KVM_IRQCHIP_IOAPIC
:
6245 kvm_get_ioapic(kvm
, &chip
->chip
.ioapic
);
6254 static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
6256 struct kvm_pic
*pic
= kvm
->arch
.vpic
;
6260 switch (chip
->chip_id
) {
6261 case KVM_IRQCHIP_PIC_MASTER
:
6262 spin_lock(&pic
->lock
);
6263 memcpy(&pic
->pics
[0], &chip
->chip
.pic
,
6264 sizeof(struct kvm_pic_state
));
6265 spin_unlock(&pic
->lock
);
6267 case KVM_IRQCHIP_PIC_SLAVE
:
6268 spin_lock(&pic
->lock
);
6269 memcpy(&pic
->pics
[1], &chip
->chip
.pic
,
6270 sizeof(struct kvm_pic_state
));
6271 spin_unlock(&pic
->lock
);
6273 case KVM_IRQCHIP_IOAPIC
:
6274 kvm_set_ioapic(kvm
, &chip
->chip
.ioapic
);
6280 kvm_pic_update_irq(pic
);
6284 static int kvm_vm_ioctl_get_pit(struct kvm
*kvm
, struct kvm_pit_state
*ps
)
6286 struct kvm_kpit_state
*kps
= &kvm
->arch
.vpit
->pit_state
;
6288 BUILD_BUG_ON(sizeof(*ps
) != sizeof(kps
->channels
));
6290 mutex_lock(&kps
->lock
);
6291 memcpy(ps
, &kps
->channels
, sizeof(*ps
));
6292 mutex_unlock(&kps
->lock
);
6296 static int kvm_vm_ioctl_set_pit(struct kvm
*kvm
, struct kvm_pit_state
*ps
)
6299 struct kvm_pit
*pit
= kvm
->arch
.vpit
;
6301 mutex_lock(&pit
->pit_state
.lock
);
6302 memcpy(&pit
->pit_state
.channels
, ps
, sizeof(*ps
));
6303 for (i
= 0; i
< 3; i
++)
6304 kvm_pit_load_count(pit
, i
, ps
->channels
[i
].count
, 0);
6305 mutex_unlock(&pit
->pit_state
.lock
);
6309 static int kvm_vm_ioctl_get_pit2(struct kvm
*kvm
, struct kvm_pit_state2
*ps
)
6311 mutex_lock(&kvm
->arch
.vpit
->pit_state
.lock
);
6312 memcpy(ps
->channels
, &kvm
->arch
.vpit
->pit_state
.channels
,
6313 sizeof(ps
->channels
));
6314 ps
->flags
= kvm
->arch
.vpit
->pit_state
.flags
;
6315 mutex_unlock(&kvm
->arch
.vpit
->pit_state
.lock
);
6316 memset(&ps
->reserved
, 0, sizeof(ps
->reserved
));
6320 static int kvm_vm_ioctl_set_pit2(struct kvm
*kvm
, struct kvm_pit_state2
*ps
)
6324 u32 prev_legacy
, cur_legacy
;
6325 struct kvm_pit
*pit
= kvm
->arch
.vpit
;
6327 mutex_lock(&pit
->pit_state
.lock
);
6328 prev_legacy
= pit
->pit_state
.flags
& KVM_PIT_FLAGS_HPET_LEGACY
;
6329 cur_legacy
= ps
->flags
& KVM_PIT_FLAGS_HPET_LEGACY
;
6330 if (!prev_legacy
&& cur_legacy
)
6332 memcpy(&pit
->pit_state
.channels
, &ps
->channels
,
6333 sizeof(pit
->pit_state
.channels
));
6334 pit
->pit_state
.flags
= ps
->flags
;
6335 for (i
= 0; i
< 3; i
++)
6336 kvm_pit_load_count(pit
, i
, pit
->pit_state
.channels
[i
].count
,
6338 mutex_unlock(&pit
->pit_state
.lock
);
6342 static int kvm_vm_ioctl_reinject(struct kvm
*kvm
,
6343 struct kvm_reinject_control
*control
)
6345 struct kvm_pit
*pit
= kvm
->arch
.vpit
;
6347 /* pit->pit_state.lock was overloaded to prevent userspace from getting
6348 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
6349 * ioctls in parallel. Use a separate lock if that ioctl isn't rare.
6351 mutex_lock(&pit
->pit_state
.lock
);
6352 kvm_pit_set_reinject(pit
, control
->pit_reinject
);
6353 mutex_unlock(&pit
->pit_state
.lock
);
6358 void kvm_arch_sync_dirty_log(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
6362 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
6363 * before reporting dirty_bitmap to userspace. KVM flushes the buffers
6364 * on all VM-Exits, thus we only need to kick running vCPUs to force a
6367 struct kvm_vcpu
*vcpu
;
6370 if (!kvm_x86_ops
.cpu_dirty_log_size
)
6373 kvm_for_each_vcpu(i
, vcpu
, kvm
)
6374 kvm_vcpu_kick(vcpu
);
6377 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
6380 if (!irqchip_in_kernel(kvm
))
6383 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
6384 irq_event
->irq
, irq_event
->level
,
6389 int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
6390 struct kvm_enable_cap
*cap
)
6398 case KVM_CAP_DISABLE_QUIRKS2
:
6400 if (cap
->args
[0] & ~KVM_X86_VALID_QUIRKS
)
6403 case KVM_CAP_DISABLE_QUIRKS
:
6404 kvm
->arch
.disabled_quirks
= cap
->args
[0];
6407 case KVM_CAP_SPLIT_IRQCHIP
: {
6408 mutex_lock(&kvm
->lock
);
6410 if (cap
->args
[0] > MAX_NR_RESERVED_IOAPIC_PINS
)
6411 goto split_irqchip_unlock
;
6413 if (irqchip_in_kernel(kvm
))
6414 goto split_irqchip_unlock
;
6415 if (kvm
->created_vcpus
)
6416 goto split_irqchip_unlock
;
6417 r
= kvm_setup_empty_irq_routing(kvm
);
6419 goto split_irqchip_unlock
;
6420 /* Pairs with irqchip_in_kernel. */
6422 kvm
->arch
.irqchip_mode
= KVM_IRQCHIP_SPLIT
;
6423 kvm
->arch
.nr_reserved_ioapic_pins
= cap
->args
[0];
6424 kvm_clear_apicv_inhibit(kvm
, APICV_INHIBIT_REASON_ABSENT
);
6426 split_irqchip_unlock
:
6427 mutex_unlock(&kvm
->lock
);
6430 case KVM_CAP_X2APIC_API
:
6432 if (cap
->args
[0] & ~KVM_X2APIC_API_VALID_FLAGS
)
6435 if (cap
->args
[0] & KVM_X2APIC_API_USE_32BIT_IDS
)
6436 kvm
->arch
.x2apic_format
= true;
6437 if (cap
->args
[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
)
6438 kvm
->arch
.x2apic_broadcast_quirk_disabled
= true;
6442 case KVM_CAP_X86_DISABLE_EXITS
:
6444 if (cap
->args
[0] & ~KVM_X86_DISABLE_VALID_EXITS
)
6447 if (cap
->args
[0] & KVM_X86_DISABLE_EXITS_PAUSE
)
6448 kvm
->arch
.pause_in_guest
= true;
6450 #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
6451 "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
6453 if (!mitigate_smt_rsb
) {
6454 if (boot_cpu_has_bug(X86_BUG_SMT_RSB
) && cpu_smt_possible() &&
6455 (cap
->args
[0] & ~KVM_X86_DISABLE_EXITS_PAUSE
))
6456 pr_warn_once(SMT_RSB_MSG
);
6458 if ((cap
->args
[0] & KVM_X86_DISABLE_EXITS_MWAIT
) &&
6459 kvm_can_mwait_in_guest())
6460 kvm
->arch
.mwait_in_guest
= true;
6461 if (cap
->args
[0] & KVM_X86_DISABLE_EXITS_HLT
)
6462 kvm
->arch
.hlt_in_guest
= true;
6463 if (cap
->args
[0] & KVM_X86_DISABLE_EXITS_CSTATE
)
6464 kvm
->arch
.cstate_in_guest
= true;
6469 case KVM_CAP_MSR_PLATFORM_INFO
:
6470 kvm
->arch
.guest_can_read_msr_platform_info
= cap
->args
[0];
6473 case KVM_CAP_EXCEPTION_PAYLOAD
:
6474 kvm
->arch
.exception_payload_enabled
= cap
->args
[0];
6477 case KVM_CAP_X86_TRIPLE_FAULT_EVENT
:
6478 kvm
->arch
.triple_fault_event
= cap
->args
[0];
6481 case KVM_CAP_X86_USER_SPACE_MSR
:
6483 if (cap
->args
[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK
)
6485 kvm
->arch
.user_space_msr_mask
= cap
->args
[0];
6488 case KVM_CAP_X86_BUS_LOCK_EXIT
:
6490 if (cap
->args
[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE
)
6493 if ((cap
->args
[0] & KVM_BUS_LOCK_DETECTION_OFF
) &&
6494 (cap
->args
[0] & KVM_BUS_LOCK_DETECTION_EXIT
))
6497 if (kvm_caps
.has_bus_lock_exit
&&
6498 cap
->args
[0] & KVM_BUS_LOCK_DETECTION_EXIT
)
6499 kvm
->arch
.bus_lock_detection_enabled
= true;
6502 #ifdef CONFIG_X86_SGX_KVM
6503 case KVM_CAP_SGX_ATTRIBUTE
: {
6504 unsigned long allowed_attributes
= 0;
6506 r
= sgx_set_attribute(&allowed_attributes
, cap
->args
[0]);
6510 /* KVM only supports the PROVISIONKEY privileged attribute. */
6511 if ((allowed_attributes
& SGX_ATTR_PROVISIONKEY
) &&
6512 !(allowed_attributes
& ~SGX_ATTR_PROVISIONKEY
))
6513 kvm
->arch
.sgx_provisioning_allowed
= true;
6519 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
:
6521 if (!kvm_x86_ops
.vm_copy_enc_context_from
)
6524 r
= static_call(kvm_x86_vm_copy_enc_context_from
)(kvm
, cap
->args
[0]);
6526 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
:
6528 if (!kvm_x86_ops
.vm_move_enc_context_from
)
6531 r
= static_call(kvm_x86_vm_move_enc_context_from
)(kvm
, cap
->args
[0]);
6533 case KVM_CAP_EXIT_HYPERCALL
:
6534 if (cap
->args
[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK
) {
6538 kvm
->arch
.hypercall_exit_enabled
= cap
->args
[0];
6541 case KVM_CAP_EXIT_ON_EMULATION_FAILURE
:
6543 if (cap
->args
[0] & ~1)
6545 kvm
->arch
.exit_on_emulation_error
= cap
->args
[0];
6548 case KVM_CAP_PMU_CAPABILITY
:
6550 if (!enable_pmu
|| (cap
->args
[0] & ~KVM_CAP_PMU_VALID_MASK
))
6553 mutex_lock(&kvm
->lock
);
6554 if (!kvm
->created_vcpus
) {
6555 kvm
->arch
.enable_pmu
= !(cap
->args
[0] & KVM_PMU_CAP_DISABLE
);
6558 mutex_unlock(&kvm
->lock
);
6560 case KVM_CAP_MAX_VCPU_ID
:
6562 if (cap
->args
[0] > KVM_MAX_VCPU_IDS
)
6565 mutex_lock(&kvm
->lock
);
6566 if (kvm
->arch
.max_vcpu_ids
== cap
->args
[0]) {
6568 } else if (!kvm
->arch
.max_vcpu_ids
) {
6569 kvm
->arch
.max_vcpu_ids
= cap
->args
[0];
6572 mutex_unlock(&kvm
->lock
);
6574 case KVM_CAP_X86_NOTIFY_VMEXIT
:
6576 if ((u32
)cap
->args
[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS
)
6578 if (!kvm_caps
.has_notify_vmexit
)
6580 if (!((u32
)cap
->args
[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED
))
6582 mutex_lock(&kvm
->lock
);
6583 if (!kvm
->created_vcpus
) {
6584 kvm
->arch
.notify_window
= cap
->args
[0] >> 32;
6585 kvm
->arch
.notify_vmexit_flags
= (u32
)cap
->args
[0];
6588 mutex_unlock(&kvm
->lock
);
6590 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
:
6594 * Since the risk of disabling NX hugepages is a guest crashing
6595 * the system, ensure the userspace process has permission to
6596 * reboot the system.
6598 * Note that unlike the reboot() syscall, the process must have
6599 * this capability in the root namespace because exposing
6600 * /dev/kvm into a container does not limit the scope of the
6601 * iTLB multihit bug to that container. In other words,
6602 * this must use capable(), not ns_capable().
6604 if (!capable(CAP_SYS_BOOT
)) {
6612 mutex_lock(&kvm
->lock
);
6613 if (!kvm
->created_vcpus
) {
6614 kvm
->arch
.disable_nx_huge_pages
= true;
6617 mutex_unlock(&kvm
->lock
);
6626 static struct kvm_x86_msr_filter
*kvm_alloc_msr_filter(bool default_allow
)
6628 struct kvm_x86_msr_filter
*msr_filter
;
6630 msr_filter
= kzalloc(sizeof(*msr_filter
), GFP_KERNEL_ACCOUNT
);
6634 msr_filter
->default_allow
= default_allow
;
6638 static void kvm_free_msr_filter(struct kvm_x86_msr_filter
*msr_filter
)
6645 for (i
= 0; i
< msr_filter
->count
; i
++)
6646 kfree(msr_filter
->ranges
[i
].bitmap
);
6651 static int kvm_add_msr_filter(struct kvm_x86_msr_filter
*msr_filter
,
6652 struct kvm_msr_filter_range
*user_range
)
6654 unsigned long *bitmap
;
6657 if (!user_range
->nmsrs
)
6660 if (user_range
->flags
& ~KVM_MSR_FILTER_RANGE_VALID_MASK
)
6663 if (!user_range
->flags
)
6666 bitmap_size
= BITS_TO_LONGS(user_range
->nmsrs
) * sizeof(long);
6667 if (!bitmap_size
|| bitmap_size
> KVM_MSR_FILTER_MAX_BITMAP_SIZE
)
6670 bitmap
= memdup_user((__user u8
*)user_range
->bitmap
, bitmap_size
);
6672 return PTR_ERR(bitmap
);
6674 msr_filter
->ranges
[msr_filter
->count
] = (struct msr_bitmap_range
) {
6675 .flags
= user_range
->flags
,
6676 .base
= user_range
->base
,
6677 .nmsrs
= user_range
->nmsrs
,
6681 msr_filter
->count
++;
6685 static int kvm_vm_ioctl_set_msr_filter(struct kvm
*kvm
,
6686 struct kvm_msr_filter
*filter
)
6688 struct kvm_x86_msr_filter
*new_filter
, *old_filter
;
6694 if (filter
->flags
& ~KVM_MSR_FILTER_VALID_MASK
)
6697 for (i
= 0; i
< ARRAY_SIZE(filter
->ranges
); i
++)
6698 empty
&= !filter
->ranges
[i
].nmsrs
;
6700 default_allow
= !(filter
->flags
& KVM_MSR_FILTER_DEFAULT_DENY
);
6701 if (empty
&& !default_allow
)
6704 new_filter
= kvm_alloc_msr_filter(default_allow
);
6708 for (i
= 0; i
< ARRAY_SIZE(filter
->ranges
); i
++) {
6709 r
= kvm_add_msr_filter(new_filter
, &filter
->ranges
[i
]);
6711 kvm_free_msr_filter(new_filter
);
6716 mutex_lock(&kvm
->lock
);
6717 old_filter
= rcu_replace_pointer(kvm
->arch
.msr_filter
, new_filter
,
6718 mutex_is_locked(&kvm
->lock
));
6719 mutex_unlock(&kvm
->lock
);
6720 synchronize_srcu(&kvm
->srcu
);
6722 kvm_free_msr_filter(old_filter
);
6724 kvm_make_all_cpus_request(kvm
, KVM_REQ_MSR_FILTER_CHANGED
);
6729 #ifdef CONFIG_KVM_COMPAT
6730 /* for KVM_X86_SET_MSR_FILTER */
6731 struct kvm_msr_filter_range_compat
{
6738 struct kvm_msr_filter_compat
{
6740 struct kvm_msr_filter_range_compat ranges
[KVM_MSR_FILTER_MAX_RANGES
];
6743 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
6745 long kvm_arch_vm_compat_ioctl(struct file
*filp
, unsigned int ioctl
,
6748 void __user
*argp
= (void __user
*)arg
;
6749 struct kvm
*kvm
= filp
->private_data
;
6753 case KVM_X86_SET_MSR_FILTER_COMPAT
: {
6754 struct kvm_msr_filter __user
*user_msr_filter
= argp
;
6755 struct kvm_msr_filter_compat filter_compat
;
6756 struct kvm_msr_filter filter
;
6759 if (copy_from_user(&filter_compat
, user_msr_filter
,
6760 sizeof(filter_compat
)))
6763 filter
.flags
= filter_compat
.flags
;
6764 for (i
= 0; i
< ARRAY_SIZE(filter
.ranges
); i
++) {
6765 struct kvm_msr_filter_range_compat
*cr
;
6767 cr
= &filter_compat
.ranges
[i
];
6768 filter
.ranges
[i
] = (struct kvm_msr_filter_range
) {
6772 .bitmap
= (__u8
*)(ulong
)cr
->bitmap
,
6776 r
= kvm_vm_ioctl_set_msr_filter(kvm
, &filter
);
6785 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
6786 static int kvm_arch_suspend_notifier(struct kvm
*kvm
)
6788 struct kvm_vcpu
*vcpu
;
6792 mutex_lock(&kvm
->lock
);
6793 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
6794 if (!vcpu
->arch
.pv_time
.active
)
6797 ret
= kvm_set_guest_paused(vcpu
);
6799 kvm_err("Failed to pause guest VCPU%d: %d\n",
6800 vcpu
->vcpu_id
, ret
);
6804 mutex_unlock(&kvm
->lock
);
6806 return ret
? NOTIFY_BAD
: NOTIFY_DONE
;
6809 int kvm_arch_pm_notifier(struct kvm
*kvm
, unsigned long state
)
6812 case PM_HIBERNATION_PREPARE
:
6813 case PM_SUSPEND_PREPARE
:
6814 return kvm_arch_suspend_notifier(kvm
);
6819 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
6821 static int kvm_vm_ioctl_get_clock(struct kvm
*kvm
, void __user
*argp
)
6823 struct kvm_clock_data data
= { 0 };
6825 get_kvmclock(kvm
, &data
);
6826 if (copy_to_user(argp
, &data
, sizeof(data
)))
6832 static int kvm_vm_ioctl_set_clock(struct kvm
*kvm
, void __user
*argp
)
6834 struct kvm_arch
*ka
= &kvm
->arch
;
6835 struct kvm_clock_data data
;
6838 if (copy_from_user(&data
, argp
, sizeof(data
)))
6842 * Only KVM_CLOCK_REALTIME is used, but allow passing the
6843 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK.
6845 if (data
.flags
& ~KVM_CLOCK_VALID_FLAGS
)
6848 kvm_hv_request_tsc_page_update(kvm
);
6849 kvm_start_pvclock_update(kvm
);
6850 pvclock_update_vm_gtod_copy(kvm
);
6853 * This pairs with kvm_guest_time_update(): when masterclock is
6854 * in use, we use master_kernel_ns + kvmclock_offset to set
6855 * unsigned 'system_time' so if we use get_kvmclock_ns() (which
6856 * is slightly ahead) here we risk going negative on unsigned
6857 * 'system_time' when 'data.clock' is very small.
6859 if (data
.flags
& KVM_CLOCK_REALTIME
) {
6860 u64 now_real_ns
= ktime_get_real_ns();
6863 * Avoid stepping the kvmclock backwards.
6865 if (now_real_ns
> data
.realtime
)
6866 data
.clock
+= now_real_ns
- data
.realtime
;
6869 if (ka
->use_master_clock
)
6870 now_raw_ns
= ka
->master_kernel_ns
;
6872 now_raw_ns
= get_kvmclock_base_ns();
6873 ka
->kvmclock_offset
= data
.clock
- now_raw_ns
;
6874 kvm_end_pvclock_update(kvm
);
6878 int kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
6880 struct kvm
*kvm
= filp
->private_data
;
6881 void __user
*argp
= (void __user
*)arg
;
6884 * This union makes it completely explicit to gcc-3.x
6885 * that these two variables' stack usage should be
6886 * combined, not added together.
6889 struct kvm_pit_state ps
;
6890 struct kvm_pit_state2 ps2
;
6891 struct kvm_pit_config pit_config
;
6895 case KVM_SET_TSS_ADDR
:
6896 r
= kvm_vm_ioctl_set_tss_addr(kvm
, arg
);
6898 case KVM_SET_IDENTITY_MAP_ADDR
: {
6901 mutex_lock(&kvm
->lock
);
6903 if (kvm
->created_vcpus
)
6904 goto set_identity_unlock
;
6906 if (copy_from_user(&ident_addr
, argp
, sizeof(ident_addr
)))
6907 goto set_identity_unlock
;
6908 r
= kvm_vm_ioctl_set_identity_map_addr(kvm
, ident_addr
);
6909 set_identity_unlock
:
6910 mutex_unlock(&kvm
->lock
);
6913 case KVM_SET_NR_MMU_PAGES
:
6914 r
= kvm_vm_ioctl_set_nr_mmu_pages(kvm
, arg
);
6916 case KVM_CREATE_IRQCHIP
: {
6917 mutex_lock(&kvm
->lock
);
6920 if (irqchip_in_kernel(kvm
))
6921 goto create_irqchip_unlock
;
6924 if (kvm
->created_vcpus
)
6925 goto create_irqchip_unlock
;
6927 r
= kvm_pic_init(kvm
);
6929 goto create_irqchip_unlock
;
6931 r
= kvm_ioapic_init(kvm
);
6933 kvm_pic_destroy(kvm
);
6934 goto create_irqchip_unlock
;
6937 r
= kvm_setup_default_irq_routing(kvm
);
6939 kvm_ioapic_destroy(kvm
);
6940 kvm_pic_destroy(kvm
);
6941 goto create_irqchip_unlock
;
6943 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
6945 kvm
->arch
.irqchip_mode
= KVM_IRQCHIP_KERNEL
;
6946 kvm_clear_apicv_inhibit(kvm
, APICV_INHIBIT_REASON_ABSENT
);
6947 create_irqchip_unlock
:
6948 mutex_unlock(&kvm
->lock
);
6951 case KVM_CREATE_PIT
:
6952 u
.pit_config
.flags
= KVM_PIT_SPEAKER_DUMMY
;
6954 case KVM_CREATE_PIT2
:
6956 if (copy_from_user(&u
.pit_config
, argp
,
6957 sizeof(struct kvm_pit_config
)))
6960 mutex_lock(&kvm
->lock
);
6963 goto create_pit_unlock
;
6965 kvm
->arch
.vpit
= kvm_create_pit(kvm
, u
.pit_config
.flags
);
6969 mutex_unlock(&kvm
->lock
);
6971 case KVM_GET_IRQCHIP
: {
6972 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
6973 struct kvm_irqchip
*chip
;
6975 chip
= memdup_user(argp
, sizeof(*chip
));
6982 if (!irqchip_kernel(kvm
))
6983 goto get_irqchip_out
;
6984 r
= kvm_vm_ioctl_get_irqchip(kvm
, chip
);
6986 goto get_irqchip_out
;
6988 if (copy_to_user(argp
, chip
, sizeof(*chip
)))
6989 goto get_irqchip_out
;
6995 case KVM_SET_IRQCHIP
: {
6996 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
6997 struct kvm_irqchip
*chip
;
6999 chip
= memdup_user(argp
, sizeof(*chip
));
7006 if (!irqchip_kernel(kvm
))
7007 goto set_irqchip_out
;
7008 r
= kvm_vm_ioctl_set_irqchip(kvm
, chip
);
7015 if (copy_from_user(&u
.ps
, argp
, sizeof(struct kvm_pit_state
)))
7018 if (!kvm
->arch
.vpit
)
7020 r
= kvm_vm_ioctl_get_pit(kvm
, &u
.ps
);
7024 if (copy_to_user(argp
, &u
.ps
, sizeof(struct kvm_pit_state
)))
7031 if (copy_from_user(&u
.ps
, argp
, sizeof(u
.ps
)))
7033 mutex_lock(&kvm
->lock
);
7035 if (!kvm
->arch
.vpit
)
7037 r
= kvm_vm_ioctl_set_pit(kvm
, &u
.ps
);
7039 mutex_unlock(&kvm
->lock
);
7042 case KVM_GET_PIT2
: {
7044 if (!kvm
->arch
.vpit
)
7046 r
= kvm_vm_ioctl_get_pit2(kvm
, &u
.ps2
);
7050 if (copy_to_user(argp
, &u
.ps2
, sizeof(u
.ps2
)))
7055 case KVM_SET_PIT2
: {
7057 if (copy_from_user(&u
.ps2
, argp
, sizeof(u
.ps2
)))
7059 mutex_lock(&kvm
->lock
);
7061 if (!kvm
->arch
.vpit
)
7063 r
= kvm_vm_ioctl_set_pit2(kvm
, &u
.ps2
);
7065 mutex_unlock(&kvm
->lock
);
7068 case KVM_REINJECT_CONTROL
: {
7069 struct kvm_reinject_control control
;
7071 if (copy_from_user(&control
, argp
, sizeof(control
)))
7074 if (!kvm
->arch
.vpit
)
7076 r
= kvm_vm_ioctl_reinject(kvm
, &control
);
7079 case KVM_SET_BOOT_CPU_ID
:
7081 mutex_lock(&kvm
->lock
);
7082 if (kvm
->created_vcpus
)
7085 kvm
->arch
.bsp_vcpu_id
= arg
;
7086 mutex_unlock(&kvm
->lock
);
7088 #ifdef CONFIG_KVM_XEN
7089 case KVM_XEN_HVM_CONFIG
: {
7090 struct kvm_xen_hvm_config xhc
;
7092 if (copy_from_user(&xhc
, argp
, sizeof(xhc
)))
7094 r
= kvm_xen_hvm_config(kvm
, &xhc
);
7097 case KVM_XEN_HVM_GET_ATTR
: {
7098 struct kvm_xen_hvm_attr xha
;
7101 if (copy_from_user(&xha
, argp
, sizeof(xha
)))
7103 r
= kvm_xen_hvm_get_attr(kvm
, &xha
);
7104 if (!r
&& copy_to_user(argp
, &xha
, sizeof(xha
)))
7108 case KVM_XEN_HVM_SET_ATTR
: {
7109 struct kvm_xen_hvm_attr xha
;
7112 if (copy_from_user(&xha
, argp
, sizeof(xha
)))
7114 r
= kvm_xen_hvm_set_attr(kvm
, &xha
);
7117 case KVM_XEN_HVM_EVTCHN_SEND
: {
7118 struct kvm_irq_routing_xen_evtchn uxe
;
7121 if (copy_from_user(&uxe
, argp
, sizeof(uxe
)))
7123 r
= kvm_xen_hvm_evtchn_send(kvm
, &uxe
);
7128 r
= kvm_vm_ioctl_set_clock(kvm
, argp
);
7131 r
= kvm_vm_ioctl_get_clock(kvm
, argp
);
7133 case KVM_SET_TSC_KHZ
: {
7137 user_tsc_khz
= (u32
)arg
;
7139 if (kvm_caps
.has_tsc_control
&&
7140 user_tsc_khz
>= kvm_caps
.max_guest_tsc_khz
)
7143 if (user_tsc_khz
== 0)
7144 user_tsc_khz
= tsc_khz
;
7146 WRITE_ONCE(kvm
->arch
.default_tsc_khz
, user_tsc_khz
);
7151 case KVM_GET_TSC_KHZ
: {
7152 r
= READ_ONCE(kvm
->arch
.default_tsc_khz
);
7155 case KVM_MEMORY_ENCRYPT_OP
: {
7157 if (!kvm_x86_ops
.mem_enc_ioctl
)
7160 r
= static_call(kvm_x86_mem_enc_ioctl
)(kvm
, argp
);
7163 case KVM_MEMORY_ENCRYPT_REG_REGION
: {
7164 struct kvm_enc_region region
;
7167 if (copy_from_user(®ion
, argp
, sizeof(region
)))
7171 if (!kvm_x86_ops
.mem_enc_register_region
)
7174 r
= static_call(kvm_x86_mem_enc_register_region
)(kvm
, ®ion
);
7177 case KVM_MEMORY_ENCRYPT_UNREG_REGION
: {
7178 struct kvm_enc_region region
;
7181 if (copy_from_user(®ion
, argp
, sizeof(region
)))
7185 if (!kvm_x86_ops
.mem_enc_unregister_region
)
7188 r
= static_call(kvm_x86_mem_enc_unregister_region
)(kvm
, ®ion
);
7191 case KVM_HYPERV_EVENTFD
: {
7192 struct kvm_hyperv_eventfd hvevfd
;
7195 if (copy_from_user(&hvevfd
, argp
, sizeof(hvevfd
)))
7197 r
= kvm_vm_ioctl_hv_eventfd(kvm
, &hvevfd
);
7200 case KVM_SET_PMU_EVENT_FILTER
:
7201 r
= kvm_vm_ioctl_set_pmu_event_filter(kvm
, argp
);
7203 case KVM_X86_SET_MSR_FILTER
: {
7204 struct kvm_msr_filter __user
*user_msr_filter
= argp
;
7205 struct kvm_msr_filter filter
;
7207 if (copy_from_user(&filter
, user_msr_filter
, sizeof(filter
)))
7210 r
= kvm_vm_ioctl_set_msr_filter(kvm
, &filter
);
7220 static void kvm_probe_feature_msr(u32 msr_index
)
7222 struct kvm_msr_entry msr
= {
7226 if (kvm_get_msr_feature(&msr
))
7229 msr_based_features
[num_msr_based_features
++] = msr_index
;
7232 static void kvm_probe_msr_to_save(u32 msr_index
)
7236 if (rdmsr_safe(msr_index
, &dummy
[0], &dummy
[1]))
7240 * Even MSRs that are valid in the host may not be exposed to guests in
7243 switch (msr_index
) {
7244 case MSR_IA32_BNDCFGS
:
7245 if (!kvm_mpx_supported())
7249 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP
) &&
7250 !kvm_cpu_cap_has(X86_FEATURE_RDPID
))
7253 case MSR_IA32_UMWAIT_CONTROL
:
7254 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG
))
7257 case MSR_IA32_RTIT_CTL
:
7258 case MSR_IA32_RTIT_STATUS
:
7259 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT
))
7262 case MSR_IA32_RTIT_CR3_MATCH
:
7263 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT
) ||
7264 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering
))
7267 case MSR_IA32_RTIT_OUTPUT_BASE
:
7268 case MSR_IA32_RTIT_OUTPUT_MASK
:
7269 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT
) ||
7270 (!intel_pt_validate_hw_cap(PT_CAP_topa_output
) &&
7271 !intel_pt_validate_hw_cap(PT_CAP_single_range_output
)))
7274 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
7275 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT
) ||
7276 (msr_index
- MSR_IA32_RTIT_ADDR0_A
>=
7277 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges
) * 2))
7280 case MSR_ARCH_PERFMON_PERFCTR0
... MSR_ARCH_PERFMON_PERFCTR_MAX
:
7281 if (msr_index
- MSR_ARCH_PERFMON_PERFCTR0
>=
7282 kvm_pmu_cap
.num_counters_gp
)
7285 case MSR_ARCH_PERFMON_EVENTSEL0
... MSR_ARCH_PERFMON_EVENTSEL_MAX
:
7286 if (msr_index
- MSR_ARCH_PERFMON_EVENTSEL0
>=
7287 kvm_pmu_cap
.num_counters_gp
)
7290 case MSR_ARCH_PERFMON_FIXED_CTR0
... MSR_ARCH_PERFMON_FIXED_CTR_MAX
:
7291 if (msr_index
- MSR_ARCH_PERFMON_FIXED_CTR0
>=
7292 kvm_pmu_cap
.num_counters_fixed
)
7295 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL
:
7296 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS
:
7297 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR
:
7298 if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2
))
7302 case MSR_IA32_XFD_ERR
:
7303 if (!kvm_cpu_cap_has(X86_FEATURE_XFD
))
7306 case MSR_IA32_TSX_CTRL
:
7307 if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR
))
7314 msrs_to_save
[num_msrs_to_save
++] = msr_index
;
7317 static void kvm_init_msr_lists(void)
7321 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED
!= 3,
7322 "Please update the fixed PMCs in msrs_to_save_pmu[]");
7324 num_msrs_to_save
= 0;
7325 num_emulated_msrs
= 0;
7326 num_msr_based_features
= 0;
7328 for (i
= 0; i
< ARRAY_SIZE(msrs_to_save_base
); i
++)
7329 kvm_probe_msr_to_save(msrs_to_save_base
[i
]);
7332 for (i
= 0; i
< ARRAY_SIZE(msrs_to_save_pmu
); i
++)
7333 kvm_probe_msr_to_save(msrs_to_save_pmu
[i
]);
7336 for (i
= 0; i
< ARRAY_SIZE(emulated_msrs_all
); i
++) {
7337 if (!static_call(kvm_x86_has_emulated_msr
)(NULL
, emulated_msrs_all
[i
]))
7340 emulated_msrs
[num_emulated_msrs
++] = emulated_msrs_all
[i
];
7343 for (i
= KVM_FIRST_EMULATED_VMX_MSR
; i
<= KVM_LAST_EMULATED_VMX_MSR
; i
++)
7344 kvm_probe_feature_msr(i
);
7346 for (i
= 0; i
< ARRAY_SIZE(msr_based_features_all_except_vmx
); i
++)
7347 kvm_probe_feature_msr(msr_based_features_all_except_vmx
[i
]);
7350 static int vcpu_mmio_write(struct kvm_vcpu
*vcpu
, gpa_t addr
, int len
,
7358 if (!(lapic_in_kernel(vcpu
) &&
7359 !kvm_iodevice_write(vcpu
, &vcpu
->arch
.apic
->dev
, addr
, n
, v
))
7360 && kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, addr
, n
, v
))
7371 static int vcpu_mmio_read(struct kvm_vcpu
*vcpu
, gpa_t addr
, int len
, void *v
)
7378 if (!(lapic_in_kernel(vcpu
) &&
7379 !kvm_iodevice_read(vcpu
, &vcpu
->arch
.apic
->dev
,
7381 && kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, addr
, n
, v
))
7383 trace_kvm_mmio(KVM_TRACE_MMIO_READ
, n
, addr
, v
);
7393 void kvm_set_segment(struct kvm_vcpu
*vcpu
,
7394 struct kvm_segment
*var
, int seg
)
7396 static_call(kvm_x86_set_segment
)(vcpu
, var
, seg
);
7399 void kvm_get_segment(struct kvm_vcpu
*vcpu
,
7400 struct kvm_segment
*var
, int seg
)
7402 static_call(kvm_x86_get_segment
)(vcpu
, var
, seg
);
7405 gpa_t
translate_nested_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
, u64 access
,
7406 struct x86_exception
*exception
)
7408 struct kvm_mmu
*mmu
= vcpu
->arch
.mmu
;
7411 BUG_ON(!mmu_is_nested(vcpu
));
7413 /* NPT walks are always user-walks */
7414 access
|= PFERR_USER_MASK
;
7415 t_gpa
= mmu
->gva_to_gpa(vcpu
, mmu
, gpa
, access
, exception
);
7420 gpa_t
kvm_mmu_gva_to_gpa_read(struct kvm_vcpu
*vcpu
, gva_t gva
,
7421 struct x86_exception
*exception
)
7423 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7425 u64 access
= (static_call(kvm_x86_get_cpl
)(vcpu
) == 3) ? PFERR_USER_MASK
: 0;
7426 return mmu
->gva_to_gpa(vcpu
, mmu
, gva
, access
, exception
);
7428 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read
);
7430 gpa_t
kvm_mmu_gva_to_gpa_write(struct kvm_vcpu
*vcpu
, gva_t gva
,
7431 struct x86_exception
*exception
)
7433 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7435 u64 access
= (static_call(kvm_x86_get_cpl
)(vcpu
) == 3) ? PFERR_USER_MASK
: 0;
7436 access
|= PFERR_WRITE_MASK
;
7437 return mmu
->gva_to_gpa(vcpu
, mmu
, gva
, access
, exception
);
7439 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write
);
7441 /* uses this to access any guest's mapped memory without checking CPL */
7442 gpa_t
kvm_mmu_gva_to_gpa_system(struct kvm_vcpu
*vcpu
, gva_t gva
,
7443 struct x86_exception
*exception
)
7445 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7447 return mmu
->gva_to_gpa(vcpu
, mmu
, gva
, 0, exception
);
7450 static int kvm_read_guest_virt_helper(gva_t addr
, void *val
, unsigned int bytes
,
7451 struct kvm_vcpu
*vcpu
, u64 access
,
7452 struct x86_exception
*exception
)
7454 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7456 int r
= X86EMUL_CONTINUE
;
7459 gpa_t gpa
= mmu
->gva_to_gpa(vcpu
, mmu
, addr
, access
, exception
);
7460 unsigned offset
= addr
& (PAGE_SIZE
-1);
7461 unsigned toread
= min(bytes
, (unsigned)PAGE_SIZE
- offset
);
7464 if (gpa
== INVALID_GPA
)
7465 return X86EMUL_PROPAGATE_FAULT
;
7466 ret
= kvm_vcpu_read_guest_page(vcpu
, gpa
>> PAGE_SHIFT
, data
,
7469 r
= X86EMUL_IO_NEEDED
;
7481 /* used for instruction fetching */
7482 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt
*ctxt
,
7483 gva_t addr
, void *val
, unsigned int bytes
,
7484 struct x86_exception
*exception
)
7486 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
7487 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7488 u64 access
= (static_call(kvm_x86_get_cpl
)(vcpu
) == 3) ? PFERR_USER_MASK
: 0;
7492 /* Inline kvm_read_guest_virt_helper for speed. */
7493 gpa_t gpa
= mmu
->gva_to_gpa(vcpu
, mmu
, addr
, access
|PFERR_FETCH_MASK
,
7495 if (unlikely(gpa
== INVALID_GPA
))
7496 return X86EMUL_PROPAGATE_FAULT
;
7498 offset
= addr
& (PAGE_SIZE
-1);
7499 if (WARN_ON(offset
+ bytes
> PAGE_SIZE
))
7500 bytes
= (unsigned)PAGE_SIZE
- offset
;
7501 ret
= kvm_vcpu_read_guest_page(vcpu
, gpa
>> PAGE_SHIFT
, val
,
7503 if (unlikely(ret
< 0))
7504 return X86EMUL_IO_NEEDED
;
7506 return X86EMUL_CONTINUE
;
7509 int kvm_read_guest_virt(struct kvm_vcpu
*vcpu
,
7510 gva_t addr
, void *val
, unsigned int bytes
,
7511 struct x86_exception
*exception
)
7513 u64 access
= (static_call(kvm_x86_get_cpl
)(vcpu
) == 3) ? PFERR_USER_MASK
: 0;
7516 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7517 * is returned, but our callers are not ready for that and they blindly
7518 * call kvm_inject_page_fault. Ensure that they at least do not leak
7519 * uninitialized kernel stack memory into cr2 and error code.
7521 memset(exception
, 0, sizeof(*exception
));
7522 return kvm_read_guest_virt_helper(addr
, val
, bytes
, vcpu
, access
,
7525 EXPORT_SYMBOL_GPL(kvm_read_guest_virt
);
7527 static int emulator_read_std(struct x86_emulate_ctxt
*ctxt
,
7528 gva_t addr
, void *val
, unsigned int bytes
,
7529 struct x86_exception
*exception
, bool system
)
7531 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
7535 access
|= PFERR_IMPLICIT_ACCESS
;
7536 else if (static_call(kvm_x86_get_cpl
)(vcpu
) == 3)
7537 access
|= PFERR_USER_MASK
;
7539 return kvm_read_guest_virt_helper(addr
, val
, bytes
, vcpu
, access
, exception
);
7542 static int kvm_write_guest_virt_helper(gva_t addr
, void *val
, unsigned int bytes
,
7543 struct kvm_vcpu
*vcpu
, u64 access
,
7544 struct x86_exception
*exception
)
7546 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7548 int r
= X86EMUL_CONTINUE
;
7551 gpa_t gpa
= mmu
->gva_to_gpa(vcpu
, mmu
, addr
, access
, exception
);
7552 unsigned offset
= addr
& (PAGE_SIZE
-1);
7553 unsigned towrite
= min(bytes
, (unsigned)PAGE_SIZE
- offset
);
7556 if (gpa
== INVALID_GPA
)
7557 return X86EMUL_PROPAGATE_FAULT
;
7558 ret
= kvm_vcpu_write_guest(vcpu
, gpa
, data
, towrite
);
7560 r
= X86EMUL_IO_NEEDED
;
7572 static int emulator_write_std(struct x86_emulate_ctxt
*ctxt
, gva_t addr
, void *val
,
7573 unsigned int bytes
, struct x86_exception
*exception
,
7576 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
7577 u64 access
= PFERR_WRITE_MASK
;
7580 access
|= PFERR_IMPLICIT_ACCESS
;
7581 else if (static_call(kvm_x86_get_cpl
)(vcpu
) == 3)
7582 access
|= PFERR_USER_MASK
;
7584 return kvm_write_guest_virt_helper(addr
, val
, bytes
, vcpu
,
7588 int kvm_write_guest_virt_system(struct kvm_vcpu
*vcpu
, gva_t addr
, void *val
,
7589 unsigned int bytes
, struct x86_exception
*exception
)
7591 /* kvm_write_guest_virt_system can pull in tons of pages. */
7592 vcpu
->arch
.l1tf_flush_l1d
= true;
7594 return kvm_write_guest_virt_helper(addr
, val
, bytes
, vcpu
,
7595 PFERR_WRITE_MASK
, exception
);
7597 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system
);
7599 static int kvm_can_emulate_insn(struct kvm_vcpu
*vcpu
, int emul_type
,
7600 void *insn
, int insn_len
)
7602 return static_call(kvm_x86_can_emulate_instruction
)(vcpu
, emul_type
,
7606 int handle_ud(struct kvm_vcpu
*vcpu
)
7608 static const char kvm_emulate_prefix
[] = { __KVM_EMULATE_PREFIX
};
7609 int fep_flags
= READ_ONCE(force_emulation_prefix
);
7610 int emul_type
= EMULTYPE_TRAP_UD
;
7611 char sig
[5]; /* ud2; .ascii "kvm" */
7612 struct x86_exception e
;
7614 if (unlikely(!kvm_can_emulate_insn(vcpu
, emul_type
, NULL
, 0)))
7618 kvm_read_guest_virt(vcpu
, kvm_get_linear_rip(vcpu
),
7619 sig
, sizeof(sig
), &e
) == 0 &&
7620 memcmp(sig
, kvm_emulate_prefix
, sizeof(sig
)) == 0) {
7621 if (fep_flags
& KVM_FEP_CLEAR_RFLAGS_RF
)
7622 kvm_set_rflags(vcpu
, kvm_get_rflags(vcpu
) & ~X86_EFLAGS_RF
);
7623 kvm_rip_write(vcpu
, kvm_rip_read(vcpu
) + sizeof(sig
));
7624 emul_type
= EMULTYPE_TRAP_UD_FORCED
;
7627 return kvm_emulate_instruction(vcpu
, emul_type
);
7629 EXPORT_SYMBOL_GPL(handle_ud
);
7631 static int vcpu_is_mmio_gpa(struct kvm_vcpu
*vcpu
, unsigned long gva
,
7632 gpa_t gpa
, bool write
)
7634 /* For APIC access vmexit */
7635 if ((gpa
& PAGE_MASK
) == APIC_DEFAULT_PHYS_BASE
)
7638 if (vcpu_match_mmio_gpa(vcpu
, gpa
)) {
7639 trace_vcpu_match_mmio(gva
, gpa
, write
, true);
7646 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu
*vcpu
, unsigned long gva
,
7647 gpa_t
*gpa
, struct x86_exception
*exception
,
7650 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
7651 u64 access
= ((static_call(kvm_x86_get_cpl
)(vcpu
) == 3) ? PFERR_USER_MASK
: 0)
7652 | (write
? PFERR_WRITE_MASK
: 0);
7655 * currently PKRU is only applied to ept enabled guest so
7656 * there is no pkey in EPT page table for L1 guest or EPT
7657 * shadow page table for L2 guest.
7659 if (vcpu_match_mmio_gva(vcpu
, gva
) && (!is_paging(vcpu
) ||
7660 !permission_fault(vcpu
, vcpu
->arch
.walk_mmu
,
7661 vcpu
->arch
.mmio_access
, 0, access
))) {
7662 *gpa
= vcpu
->arch
.mmio_gfn
<< PAGE_SHIFT
|
7663 (gva
& (PAGE_SIZE
- 1));
7664 trace_vcpu_match_mmio(gva
, *gpa
, write
, false);
7668 *gpa
= mmu
->gva_to_gpa(vcpu
, mmu
, gva
, access
, exception
);
7670 if (*gpa
== INVALID_GPA
)
7673 return vcpu_is_mmio_gpa(vcpu
, gva
, *gpa
, write
);
7676 int emulator_write_phys(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7677 const void *val
, int bytes
)
7681 ret
= kvm_vcpu_write_guest(vcpu
, gpa
, val
, bytes
);
7684 kvm_page_track_write(vcpu
, gpa
, val
, bytes
);
7688 struct read_write_emulator_ops
{
7689 int (*read_write_prepare
)(struct kvm_vcpu
*vcpu
, void *val
,
7691 int (*read_write_emulate
)(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7692 void *val
, int bytes
);
7693 int (*read_write_mmio
)(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7694 int bytes
, void *val
);
7695 int (*read_write_exit_mmio
)(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7696 void *val
, int bytes
);
7700 static int read_prepare(struct kvm_vcpu
*vcpu
, void *val
, int bytes
)
7702 if (vcpu
->mmio_read_completed
) {
7703 trace_kvm_mmio(KVM_TRACE_MMIO_READ
, bytes
,
7704 vcpu
->mmio_fragments
[0].gpa
, val
);
7705 vcpu
->mmio_read_completed
= 0;
7712 static int read_emulate(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7713 void *val
, int bytes
)
7715 return !kvm_vcpu_read_guest(vcpu
, gpa
, val
, bytes
);
7718 static int write_emulate(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7719 void *val
, int bytes
)
7721 return emulator_write_phys(vcpu
, gpa
, val
, bytes
);
7724 static int write_mmio(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int bytes
, void *val
)
7726 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE
, bytes
, gpa
, val
);
7727 return vcpu_mmio_write(vcpu
, gpa
, bytes
, val
);
7730 static int read_exit_mmio(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7731 void *val
, int bytes
)
7733 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED
, bytes
, gpa
, NULL
);
7734 return X86EMUL_IO_NEEDED
;
7737 static int write_exit_mmio(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
7738 void *val
, int bytes
)
7740 struct kvm_mmio_fragment
*frag
= &vcpu
->mmio_fragments
[0];
7742 memcpy(vcpu
->run
->mmio
.data
, frag
->data
, min(8u, frag
->len
));
7743 return X86EMUL_CONTINUE
;
7746 static const struct read_write_emulator_ops read_emultor
= {
7747 .read_write_prepare
= read_prepare
,
7748 .read_write_emulate
= read_emulate
,
7749 .read_write_mmio
= vcpu_mmio_read
,
7750 .read_write_exit_mmio
= read_exit_mmio
,
7753 static const struct read_write_emulator_ops write_emultor
= {
7754 .read_write_emulate
= write_emulate
,
7755 .read_write_mmio
= write_mmio
,
7756 .read_write_exit_mmio
= write_exit_mmio
,
7760 static int emulator_read_write_onepage(unsigned long addr
, void *val
,
7762 struct x86_exception
*exception
,
7763 struct kvm_vcpu
*vcpu
,
7764 const struct read_write_emulator_ops
*ops
)
7768 bool write
= ops
->write
;
7769 struct kvm_mmio_fragment
*frag
;
7770 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
7773 * If the exit was due to a NPF we may already have a GPA.
7774 * If the GPA is present, use it to avoid the GVA to GPA table walk.
7775 * Note, this cannot be used on string operations since string
7776 * operation using rep will only have the initial GPA from the NPF
7779 if (ctxt
->gpa_available
&& emulator_can_use_gpa(ctxt
) &&
7780 (addr
& ~PAGE_MASK
) == (ctxt
->gpa_val
& ~PAGE_MASK
)) {
7781 gpa
= ctxt
->gpa_val
;
7782 ret
= vcpu_is_mmio_gpa(vcpu
, addr
, gpa
, write
);
7784 ret
= vcpu_mmio_gva_to_gpa(vcpu
, addr
, &gpa
, exception
, write
);
7786 return X86EMUL_PROPAGATE_FAULT
;
7789 if (!ret
&& ops
->read_write_emulate(vcpu
, gpa
, val
, bytes
))
7790 return X86EMUL_CONTINUE
;
7793 * Is this MMIO handled locally?
7795 handled
= ops
->read_write_mmio(vcpu
, gpa
, bytes
, val
);
7796 if (handled
== bytes
)
7797 return X86EMUL_CONTINUE
;
7803 WARN_ON(vcpu
->mmio_nr_fragments
>= KVM_MAX_MMIO_FRAGMENTS
);
7804 frag
= &vcpu
->mmio_fragments
[vcpu
->mmio_nr_fragments
++];
7808 return X86EMUL_CONTINUE
;
7811 static int emulator_read_write(struct x86_emulate_ctxt
*ctxt
,
7813 void *val
, unsigned int bytes
,
7814 struct x86_exception
*exception
,
7815 const struct read_write_emulator_ops
*ops
)
7817 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
7821 if (ops
->read_write_prepare
&&
7822 ops
->read_write_prepare(vcpu
, val
, bytes
))
7823 return X86EMUL_CONTINUE
;
7825 vcpu
->mmio_nr_fragments
= 0;
7827 /* Crossing a page boundary? */
7828 if (((addr
+ bytes
- 1) ^ addr
) & PAGE_MASK
) {
7831 now
= -addr
& ~PAGE_MASK
;
7832 rc
= emulator_read_write_onepage(addr
, val
, now
, exception
,
7835 if (rc
!= X86EMUL_CONTINUE
)
7838 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
7844 rc
= emulator_read_write_onepage(addr
, val
, bytes
, exception
,
7846 if (rc
!= X86EMUL_CONTINUE
)
7849 if (!vcpu
->mmio_nr_fragments
)
7852 gpa
= vcpu
->mmio_fragments
[0].gpa
;
7854 vcpu
->mmio_needed
= 1;
7855 vcpu
->mmio_cur_fragment
= 0;
7857 vcpu
->run
->mmio
.len
= min(8u, vcpu
->mmio_fragments
[0].len
);
7858 vcpu
->run
->mmio
.is_write
= vcpu
->mmio_is_write
= ops
->write
;
7859 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
7860 vcpu
->run
->mmio
.phys_addr
= gpa
;
7862 return ops
->read_write_exit_mmio(vcpu
, gpa
, val
, bytes
);
7865 static int emulator_read_emulated(struct x86_emulate_ctxt
*ctxt
,
7869 struct x86_exception
*exception
)
7871 return emulator_read_write(ctxt
, addr
, val
, bytes
,
7872 exception
, &read_emultor
);
7875 static int emulator_write_emulated(struct x86_emulate_ctxt
*ctxt
,
7879 struct x86_exception
*exception
)
7881 return emulator_read_write(ctxt
, addr
, (void *)val
, bytes
,
7882 exception
, &write_emultor
);
7885 #define emulator_try_cmpxchg_user(t, ptr, old, new) \
7886 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
7888 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt
*ctxt
,
7893 struct x86_exception
*exception
)
7895 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
7901 /* guests cmpxchg8b have to be emulated atomically */
7902 if (bytes
> 8 || (bytes
& (bytes
- 1)))
7905 gpa
= kvm_mmu_gva_to_gpa_write(vcpu
, addr
, NULL
);
7907 if (gpa
== INVALID_GPA
||
7908 (gpa
& PAGE_MASK
) == APIC_DEFAULT_PHYS_BASE
)
7912 * Emulate the atomic as a straight write to avoid #AC if SLD is
7913 * enabled in the host and the access splits a cache line.
7915 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT
))
7916 page_line_mask
= ~(cache_line_size() - 1);
7918 page_line_mask
= PAGE_MASK
;
7920 if (((gpa
+ bytes
- 1) & page_line_mask
) != (gpa
& page_line_mask
))
7923 hva
= kvm_vcpu_gfn_to_hva(vcpu
, gpa_to_gfn(gpa
));
7924 if (kvm_is_error_hva(hva
))
7927 hva
+= offset_in_page(gpa
);
7931 r
= emulator_try_cmpxchg_user(u8
, hva
, old
, new);
7934 r
= emulator_try_cmpxchg_user(u16
, hva
, old
, new);
7937 r
= emulator_try_cmpxchg_user(u32
, hva
, old
, new);
7940 r
= emulator_try_cmpxchg_user(u64
, hva
, old
, new);
7947 return X86EMUL_UNHANDLEABLE
;
7949 return X86EMUL_CMPXCHG_FAILED
;
7951 kvm_page_track_write(vcpu
, gpa
, new, bytes
);
7953 return X86EMUL_CONTINUE
;
7956 pr_warn_once("emulating exchange as write\n");
7958 return emulator_write_emulated(ctxt
, addr
, new, bytes
, exception
);
7961 static int emulator_pio_in_out(struct kvm_vcpu
*vcpu
, int size
,
7962 unsigned short port
, void *data
,
7963 unsigned int count
, bool in
)
7968 WARN_ON_ONCE(vcpu
->arch
.pio
.count
);
7969 for (i
= 0; i
< count
; i
++) {
7971 r
= kvm_io_bus_read(vcpu
, KVM_PIO_BUS
, port
, size
, data
);
7973 r
= kvm_io_bus_write(vcpu
, KVM_PIO_BUS
, port
, size
, data
);
7980 * Userspace must have unregistered the device while PIO
7981 * was running. Drop writes / read as 0.
7984 memset(data
, 0, size
* (count
- i
));
7993 vcpu
->arch
.pio
.port
= port
;
7994 vcpu
->arch
.pio
.in
= in
;
7995 vcpu
->arch
.pio
.count
= count
;
7996 vcpu
->arch
.pio
.size
= size
;
7999 memset(vcpu
->arch
.pio_data
, 0, size
* count
);
8001 memcpy(vcpu
->arch
.pio_data
, data
, size
* count
);
8003 vcpu
->run
->exit_reason
= KVM_EXIT_IO
;
8004 vcpu
->run
->io
.direction
= in
? KVM_EXIT_IO_IN
: KVM_EXIT_IO_OUT
;
8005 vcpu
->run
->io
.size
= size
;
8006 vcpu
->run
->io
.data_offset
= KVM_PIO_PAGE_OFFSET
* PAGE_SIZE
;
8007 vcpu
->run
->io
.count
= count
;
8008 vcpu
->run
->io
.port
= port
;
8012 static int emulator_pio_in(struct kvm_vcpu
*vcpu
, int size
,
8013 unsigned short port
, void *val
, unsigned int count
)
8015 int r
= emulator_pio_in_out(vcpu
, size
, port
, val
, count
, true);
8017 trace_kvm_pio(KVM_PIO_IN
, port
, size
, count
, val
);
8022 static void complete_emulator_pio_in(struct kvm_vcpu
*vcpu
, void *val
)
8024 int size
= vcpu
->arch
.pio
.size
;
8025 unsigned int count
= vcpu
->arch
.pio
.count
;
8026 memcpy(val
, vcpu
->arch
.pio_data
, size
* count
);
8027 trace_kvm_pio(KVM_PIO_IN
, vcpu
->arch
.pio
.port
, size
, count
, vcpu
->arch
.pio_data
);
8028 vcpu
->arch
.pio
.count
= 0;
8031 static int emulator_pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
8032 int size
, unsigned short port
, void *val
,
8035 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8036 if (vcpu
->arch
.pio
.count
) {
8038 * Complete a previous iteration that required userspace I/O.
8039 * Note, @count isn't guaranteed to match pio.count as userspace
8040 * can modify ECX before rerunning the vCPU. Ignore any such
8041 * shenanigans as KVM doesn't support modifying the rep count,
8042 * and the emulator ensures @count doesn't overflow the buffer.
8044 complete_emulator_pio_in(vcpu
, val
);
8048 return emulator_pio_in(vcpu
, size
, port
, val
, count
);
8051 static int emulator_pio_out(struct kvm_vcpu
*vcpu
, int size
,
8052 unsigned short port
, const void *val
,
8055 trace_kvm_pio(KVM_PIO_OUT
, port
, size
, count
, val
);
8056 return emulator_pio_in_out(vcpu
, size
, port
, (void *)val
, count
, false);
8059 static int emulator_pio_out_emulated(struct x86_emulate_ctxt
*ctxt
,
8060 int size
, unsigned short port
,
8061 const void *val
, unsigned int count
)
8063 return emulator_pio_out(emul_to_vcpu(ctxt
), size
, port
, val
, count
);
8066 static unsigned long get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
8068 return static_call(kvm_x86_get_segment_base
)(vcpu
, seg
);
8071 static void emulator_invlpg(struct x86_emulate_ctxt
*ctxt
, ulong address
)
8073 kvm_mmu_invlpg(emul_to_vcpu(ctxt
), address
);
8076 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu
*vcpu
)
8078 if (!need_emulate_wbinvd(vcpu
))
8079 return X86EMUL_CONTINUE
;
8081 if (static_call(kvm_x86_has_wbinvd_exit
)()) {
8082 int cpu
= get_cpu();
8084 cpumask_set_cpu(cpu
, vcpu
->arch
.wbinvd_dirty_mask
);
8085 on_each_cpu_mask(vcpu
->arch
.wbinvd_dirty_mask
,
8086 wbinvd_ipi
, NULL
, 1);
8088 cpumask_clear(vcpu
->arch
.wbinvd_dirty_mask
);
8091 return X86EMUL_CONTINUE
;
8094 int kvm_emulate_wbinvd(struct kvm_vcpu
*vcpu
)
8096 kvm_emulate_wbinvd_noskip(vcpu
);
8097 return kvm_skip_emulated_instruction(vcpu
);
8099 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd
);
8103 static void emulator_wbinvd(struct x86_emulate_ctxt
*ctxt
)
8105 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt
));
8108 static void emulator_get_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
8109 unsigned long *dest
)
8111 kvm_get_dr(emul_to_vcpu(ctxt
), dr
, dest
);
8114 static int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
8115 unsigned long value
)
8118 return kvm_set_dr(emul_to_vcpu(ctxt
), dr
, value
);
8121 static u64
mk_cr_64(u64 curr_cr
, u32 new_val
)
8123 return (curr_cr
& ~((1ULL << 32) - 1)) | new_val
;
8126 static unsigned long emulator_get_cr(struct x86_emulate_ctxt
*ctxt
, int cr
)
8128 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8129 unsigned long value
;
8133 value
= kvm_read_cr0(vcpu
);
8136 value
= vcpu
->arch
.cr2
;
8139 value
= kvm_read_cr3(vcpu
);
8142 value
= kvm_read_cr4(vcpu
);
8145 value
= kvm_get_cr8(vcpu
);
8148 kvm_err("%s: unexpected cr %u\n", __func__
, cr
);
8155 static int emulator_set_cr(struct x86_emulate_ctxt
*ctxt
, int cr
, ulong val
)
8157 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8162 res
= kvm_set_cr0(vcpu
, mk_cr_64(kvm_read_cr0(vcpu
), val
));
8165 vcpu
->arch
.cr2
= val
;
8168 res
= kvm_set_cr3(vcpu
, val
);
8171 res
= kvm_set_cr4(vcpu
, mk_cr_64(kvm_read_cr4(vcpu
), val
));
8174 res
= kvm_set_cr8(vcpu
, val
);
8177 kvm_err("%s: unexpected cr %u\n", __func__
, cr
);
8184 static int emulator_get_cpl(struct x86_emulate_ctxt
*ctxt
)
8186 return static_call(kvm_x86_get_cpl
)(emul_to_vcpu(ctxt
));
8189 static void emulator_get_gdt(struct x86_emulate_ctxt
*ctxt
, struct desc_ptr
*dt
)
8191 static_call(kvm_x86_get_gdt
)(emul_to_vcpu(ctxt
), dt
);
8194 static void emulator_get_idt(struct x86_emulate_ctxt
*ctxt
, struct desc_ptr
*dt
)
8196 static_call(kvm_x86_get_idt
)(emul_to_vcpu(ctxt
), dt
);
8199 static void emulator_set_gdt(struct x86_emulate_ctxt
*ctxt
, struct desc_ptr
*dt
)
8201 static_call(kvm_x86_set_gdt
)(emul_to_vcpu(ctxt
), dt
);
8204 static void emulator_set_idt(struct x86_emulate_ctxt
*ctxt
, struct desc_ptr
*dt
)
8206 static_call(kvm_x86_set_idt
)(emul_to_vcpu(ctxt
), dt
);
8209 static unsigned long emulator_get_cached_segment_base(
8210 struct x86_emulate_ctxt
*ctxt
, int seg
)
8212 return get_segment_base(emul_to_vcpu(ctxt
), seg
);
8215 static bool emulator_get_segment(struct x86_emulate_ctxt
*ctxt
, u16
*selector
,
8216 struct desc_struct
*desc
, u32
*base3
,
8219 struct kvm_segment var
;
8221 kvm_get_segment(emul_to_vcpu(ctxt
), &var
, seg
);
8222 *selector
= var
.selector
;
8225 memset(desc
, 0, sizeof(*desc
));
8233 set_desc_limit(desc
, var
.limit
);
8234 set_desc_base(desc
, (unsigned long)var
.base
);
8235 #ifdef CONFIG_X86_64
8237 *base3
= var
.base
>> 32;
8239 desc
->type
= var
.type
;
8241 desc
->dpl
= var
.dpl
;
8242 desc
->p
= var
.present
;
8243 desc
->avl
= var
.avl
;
8251 static void emulator_set_segment(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
8252 struct desc_struct
*desc
, u32 base3
,
8255 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8256 struct kvm_segment var
;
8258 var
.selector
= selector
;
8259 var
.base
= get_desc_base(desc
);
8260 #ifdef CONFIG_X86_64
8261 var
.base
|= ((u64
)base3
) << 32;
8263 var
.limit
= get_desc_limit(desc
);
8265 var
.limit
= (var
.limit
<< 12) | 0xfff;
8266 var
.type
= desc
->type
;
8267 var
.dpl
= desc
->dpl
;
8272 var
.avl
= desc
->avl
;
8273 var
.present
= desc
->p
;
8274 var
.unusable
= !var
.present
;
8277 kvm_set_segment(vcpu
, &var
, seg
);
8281 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt
*ctxt
,
8282 u32 msr_index
, u64
*pdata
)
8284 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8287 r
= kvm_get_msr_with_filter(vcpu
, msr_index
, pdata
);
8289 return X86EMUL_UNHANDLEABLE
;
8292 if (kvm_msr_user_space(vcpu
, msr_index
, KVM_EXIT_X86_RDMSR
, 0,
8293 complete_emulated_rdmsr
, r
))
8294 return X86EMUL_IO_NEEDED
;
8296 trace_kvm_msr_read_ex(msr_index
);
8297 return X86EMUL_PROPAGATE_FAULT
;
8300 trace_kvm_msr_read(msr_index
, *pdata
);
8301 return X86EMUL_CONTINUE
;
8304 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt
*ctxt
,
8305 u32 msr_index
, u64 data
)
8307 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8310 r
= kvm_set_msr_with_filter(vcpu
, msr_index
, data
);
8312 return X86EMUL_UNHANDLEABLE
;
8315 if (kvm_msr_user_space(vcpu
, msr_index
, KVM_EXIT_X86_WRMSR
, data
,
8316 complete_emulated_msr_access
, r
))
8317 return X86EMUL_IO_NEEDED
;
8319 trace_kvm_msr_write_ex(msr_index
, data
);
8320 return X86EMUL_PROPAGATE_FAULT
;
8323 trace_kvm_msr_write(msr_index
, data
);
8324 return X86EMUL_CONTINUE
;
8327 static int emulator_get_msr(struct x86_emulate_ctxt
*ctxt
,
8328 u32 msr_index
, u64
*pdata
)
8330 return kvm_get_msr(emul_to_vcpu(ctxt
), msr_index
, pdata
);
8333 static int emulator_check_pmc(struct x86_emulate_ctxt
*ctxt
,
8336 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt
), pmc
))
8341 static int emulator_read_pmc(struct x86_emulate_ctxt
*ctxt
,
8342 u32 pmc
, u64
*pdata
)
8344 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt
), pmc
, pdata
);
8347 static void emulator_halt(struct x86_emulate_ctxt
*ctxt
)
8349 emul_to_vcpu(ctxt
)->arch
.halt_request
= 1;
8352 static int emulator_intercept(struct x86_emulate_ctxt
*ctxt
,
8353 struct x86_instruction_info
*info
,
8354 enum x86_intercept_stage stage
)
8356 return static_call(kvm_x86_check_intercept
)(emul_to_vcpu(ctxt
), info
, stage
,
8360 static bool emulator_get_cpuid(struct x86_emulate_ctxt
*ctxt
,
8361 u32
*eax
, u32
*ebx
, u32
*ecx
, u32
*edx
,
8364 return kvm_cpuid(emul_to_vcpu(ctxt
), eax
, ebx
, ecx
, edx
, exact_only
);
8367 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt
*ctxt
)
8369 return guest_cpuid_has(emul_to_vcpu(ctxt
), X86_FEATURE_MOVBE
);
8372 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt
*ctxt
)
8374 return guest_cpuid_has(emul_to_vcpu(ctxt
), X86_FEATURE_FXSR
);
8377 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt
*ctxt
)
8379 return guest_cpuid_has(emul_to_vcpu(ctxt
), X86_FEATURE_RDPID
);
8382 static ulong
emulator_read_gpr(struct x86_emulate_ctxt
*ctxt
, unsigned reg
)
8384 return kvm_register_read_raw(emul_to_vcpu(ctxt
), reg
);
8387 static void emulator_write_gpr(struct x86_emulate_ctxt
*ctxt
, unsigned reg
, ulong val
)
8389 kvm_register_write_raw(emul_to_vcpu(ctxt
), reg
, val
);
8392 static void emulator_set_nmi_mask(struct x86_emulate_ctxt
*ctxt
, bool masked
)
8394 static_call(kvm_x86_set_nmi_mask
)(emul_to_vcpu(ctxt
), masked
);
8397 static bool emulator_is_smm(struct x86_emulate_ctxt
*ctxt
)
8399 return is_smm(emul_to_vcpu(ctxt
));
8402 static bool emulator_is_guest_mode(struct x86_emulate_ctxt
*ctxt
)
8404 return is_guest_mode(emul_to_vcpu(ctxt
));
8407 #ifndef CONFIG_KVM_SMM
8408 static int emulator_leave_smm(struct x86_emulate_ctxt
*ctxt
)
8411 return X86EMUL_UNHANDLEABLE
;
8415 static void emulator_triple_fault(struct x86_emulate_ctxt
*ctxt
)
8417 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, emul_to_vcpu(ctxt
));
8420 static int emulator_set_xcr(struct x86_emulate_ctxt
*ctxt
, u32 index
, u64 xcr
)
8422 return __kvm_set_xcr(emul_to_vcpu(ctxt
), index
, xcr
);
8425 static void emulator_vm_bugged(struct x86_emulate_ctxt
*ctxt
)
8427 struct kvm
*kvm
= emul_to_vcpu(ctxt
)->kvm
;
8429 if (!kvm
->vm_bugged
)
8433 static const struct x86_emulate_ops emulate_ops
= {
8434 .vm_bugged
= emulator_vm_bugged
,
8435 .read_gpr
= emulator_read_gpr
,
8436 .write_gpr
= emulator_write_gpr
,
8437 .read_std
= emulator_read_std
,
8438 .write_std
= emulator_write_std
,
8439 .fetch
= kvm_fetch_guest_virt
,
8440 .read_emulated
= emulator_read_emulated
,
8441 .write_emulated
= emulator_write_emulated
,
8442 .cmpxchg_emulated
= emulator_cmpxchg_emulated
,
8443 .invlpg
= emulator_invlpg
,
8444 .pio_in_emulated
= emulator_pio_in_emulated
,
8445 .pio_out_emulated
= emulator_pio_out_emulated
,
8446 .get_segment
= emulator_get_segment
,
8447 .set_segment
= emulator_set_segment
,
8448 .get_cached_segment_base
= emulator_get_cached_segment_base
,
8449 .get_gdt
= emulator_get_gdt
,
8450 .get_idt
= emulator_get_idt
,
8451 .set_gdt
= emulator_set_gdt
,
8452 .set_idt
= emulator_set_idt
,
8453 .get_cr
= emulator_get_cr
,
8454 .set_cr
= emulator_set_cr
,
8455 .cpl
= emulator_get_cpl
,
8456 .get_dr
= emulator_get_dr
,
8457 .set_dr
= emulator_set_dr
,
8458 .set_msr_with_filter
= emulator_set_msr_with_filter
,
8459 .get_msr_with_filter
= emulator_get_msr_with_filter
,
8460 .get_msr
= emulator_get_msr
,
8461 .check_pmc
= emulator_check_pmc
,
8462 .read_pmc
= emulator_read_pmc
,
8463 .halt
= emulator_halt
,
8464 .wbinvd
= emulator_wbinvd
,
8465 .fix_hypercall
= emulator_fix_hypercall
,
8466 .intercept
= emulator_intercept
,
8467 .get_cpuid
= emulator_get_cpuid
,
8468 .guest_has_movbe
= emulator_guest_has_movbe
,
8469 .guest_has_fxsr
= emulator_guest_has_fxsr
,
8470 .guest_has_rdpid
= emulator_guest_has_rdpid
,
8471 .set_nmi_mask
= emulator_set_nmi_mask
,
8472 .is_smm
= emulator_is_smm
,
8473 .is_guest_mode
= emulator_is_guest_mode
,
8474 .leave_smm
= emulator_leave_smm
,
8475 .triple_fault
= emulator_triple_fault
,
8476 .set_xcr
= emulator_set_xcr
,
8479 static void toggle_interruptibility(struct kvm_vcpu
*vcpu
, u32 mask
)
8481 u32 int_shadow
= static_call(kvm_x86_get_interrupt_shadow
)(vcpu
);
8483 * an sti; sti; sequence only disable interrupts for the first
8484 * instruction. So, if the last instruction, be it emulated or
8485 * not, left the system with the INT_STI flag enabled, it
8486 * means that the last instruction is an sti. We should not
8487 * leave the flag on in this case. The same goes for mov ss
8489 if (int_shadow
& mask
)
8491 if (unlikely(int_shadow
|| mask
)) {
8492 static_call(kvm_x86_set_interrupt_shadow
)(vcpu
, mask
);
8494 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
8498 static void inject_emulated_exception(struct kvm_vcpu
*vcpu
)
8500 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
8502 if (ctxt
->exception
.vector
== PF_VECTOR
)
8503 kvm_inject_emulated_page_fault(vcpu
, &ctxt
->exception
);
8504 else if (ctxt
->exception
.error_code_valid
)
8505 kvm_queue_exception_e(vcpu
, ctxt
->exception
.vector
,
8506 ctxt
->exception
.error_code
);
8508 kvm_queue_exception(vcpu
, ctxt
->exception
.vector
);
8511 static struct x86_emulate_ctxt
*alloc_emulate_ctxt(struct kvm_vcpu
*vcpu
)
8513 struct x86_emulate_ctxt
*ctxt
;
8515 ctxt
= kmem_cache_zalloc(x86_emulator_cache
, GFP_KERNEL_ACCOUNT
);
8517 pr_err("failed to allocate vcpu's emulator\n");
8522 ctxt
->ops
= &emulate_ops
;
8523 vcpu
->arch
.emulate_ctxt
= ctxt
;
8528 static void init_emulate_ctxt(struct kvm_vcpu
*vcpu
)
8530 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
8533 static_call(kvm_x86_get_cs_db_l_bits
)(vcpu
, &cs_db
, &cs_l
);
8535 ctxt
->gpa_available
= false;
8536 ctxt
->eflags
= kvm_get_rflags(vcpu
);
8537 ctxt
->tf
= (ctxt
->eflags
& X86_EFLAGS_TF
) != 0;
8539 ctxt
->eip
= kvm_rip_read(vcpu
);
8540 ctxt
->mode
= (!is_protmode(vcpu
)) ? X86EMUL_MODE_REAL
:
8541 (ctxt
->eflags
& X86_EFLAGS_VM
) ? X86EMUL_MODE_VM86
:
8542 (cs_l
&& is_long_mode(vcpu
)) ? X86EMUL_MODE_PROT64
:
8543 cs_db
? X86EMUL_MODE_PROT32
:
8544 X86EMUL_MODE_PROT16
;
8545 ctxt
->interruptibility
= 0;
8546 ctxt
->have_exception
= false;
8547 ctxt
->exception
.vector
= -1;
8548 ctxt
->perm_ok
= false;
8550 init_decode_cache(ctxt
);
8551 vcpu
->arch
.emulate_regs_need_sync_from_vcpu
= false;
8554 void kvm_inject_realmode_interrupt(struct kvm_vcpu
*vcpu
, int irq
, int inc_eip
)
8556 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
8559 init_emulate_ctxt(vcpu
);
8563 ctxt
->_eip
= ctxt
->eip
+ inc_eip
;
8564 ret
= emulate_int_real(ctxt
, irq
);
8566 if (ret
!= X86EMUL_CONTINUE
) {
8567 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
8569 ctxt
->eip
= ctxt
->_eip
;
8570 kvm_rip_write(vcpu
, ctxt
->eip
);
8571 kvm_set_rflags(vcpu
, ctxt
->eflags
);
8574 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt
);
8576 static void prepare_emulation_failure_exit(struct kvm_vcpu
*vcpu
, u64
*data
,
8577 u8 ndata
, u8
*insn_bytes
, u8 insn_size
)
8579 struct kvm_run
*run
= vcpu
->run
;
8584 * Zero the whole array used to retrieve the exit info, as casting to
8585 * u32 for select entries will leave some chunks uninitialized.
8587 memset(&info
, 0, sizeof(info
));
8589 static_call(kvm_x86_get_exit_info
)(vcpu
, (u32
*)&info
[0], &info
[1],
8590 &info
[2], (u32
*)&info
[3],
8593 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
8594 run
->emulation_failure
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
8597 * There's currently space for 13 entries, but 5 are used for the exit
8598 * reason and info. Restrict to 4 to reduce the maintenance burden
8599 * when expanding kvm_run.emulation_failure in the future.
8601 if (WARN_ON_ONCE(ndata
> 4))
8604 /* Always include the flags as a 'data' entry. */
8606 run
->emulation_failure
.flags
= 0;
8609 BUILD_BUG_ON((sizeof(run
->emulation_failure
.insn_size
) +
8610 sizeof(run
->emulation_failure
.insn_bytes
) != 16));
8612 run
->emulation_failure
.flags
|=
8613 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES
;
8614 run
->emulation_failure
.insn_size
= insn_size
;
8615 memset(run
->emulation_failure
.insn_bytes
, 0x90,
8616 sizeof(run
->emulation_failure
.insn_bytes
));
8617 memcpy(run
->emulation_failure
.insn_bytes
, insn_bytes
, insn_size
);
8620 memcpy(&run
->internal
.data
[info_start
], info
, sizeof(info
));
8621 memcpy(&run
->internal
.data
[info_start
+ ARRAY_SIZE(info
)], data
,
8622 ndata
* sizeof(data
[0]));
8624 run
->emulation_failure
.ndata
= info_start
+ ARRAY_SIZE(info
) + ndata
;
8627 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu
*vcpu
)
8629 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
8631 prepare_emulation_failure_exit(vcpu
, NULL
, 0, ctxt
->fetch
.data
,
8632 ctxt
->fetch
.end
- ctxt
->fetch
.data
);
8635 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu
*vcpu
, u64
*data
,
8638 prepare_emulation_failure_exit(vcpu
, data
, ndata
, NULL
, 0);
8640 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit
);
8642 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu
*vcpu
)
8644 __kvm_prepare_emulation_failure_exit(vcpu
, NULL
, 0);
8646 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit
);
8648 static int handle_emulation_failure(struct kvm_vcpu
*vcpu
, int emulation_type
)
8650 struct kvm
*kvm
= vcpu
->kvm
;
8652 ++vcpu
->stat
.insn_emulation_fail
;
8653 trace_kvm_emulate_insn_failed(vcpu
);
8655 if (emulation_type
& EMULTYPE_VMWARE_GP
) {
8656 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
8660 if (kvm
->arch
.exit_on_emulation_error
||
8661 (emulation_type
& EMULTYPE_SKIP
)) {
8662 prepare_emulation_ctxt_failure_exit(vcpu
);
8666 kvm_queue_exception(vcpu
, UD_VECTOR
);
8668 if (!is_guest_mode(vcpu
) && static_call(kvm_x86_get_cpl
)(vcpu
) == 0) {
8669 prepare_emulation_ctxt_failure_exit(vcpu
);
8676 static bool reexecute_instruction(struct kvm_vcpu
*vcpu
, gpa_t cr2_or_gpa
,
8679 gpa_t gpa
= cr2_or_gpa
;
8682 if (!(emulation_type
& EMULTYPE_ALLOW_RETRY_PF
))
8685 if (WARN_ON_ONCE(is_guest_mode(vcpu
)) ||
8686 WARN_ON_ONCE(!(emulation_type
& EMULTYPE_PF
)))
8689 if (!vcpu
->arch
.mmu
->root_role
.direct
) {
8691 * Write permission should be allowed since only
8692 * write access need to be emulated.
8694 gpa
= kvm_mmu_gva_to_gpa_write(vcpu
, cr2_or_gpa
, NULL
);
8697 * If the mapping is invalid in guest, let cpu retry
8698 * it to generate fault.
8700 if (gpa
== INVALID_GPA
)
8705 * Do not retry the unhandleable instruction if it faults on the
8706 * readonly host memory, otherwise it will goto a infinite loop:
8707 * retry instruction -> write #PF -> emulation fail -> retry
8708 * instruction -> ...
8710 pfn
= gfn_to_pfn(vcpu
->kvm
, gpa_to_gfn(gpa
));
8713 * If the instruction failed on the error pfn, it can not be fixed,
8714 * report the error to userspace.
8716 if (is_error_noslot_pfn(pfn
))
8719 kvm_release_pfn_clean(pfn
);
8721 /* The instructions are well-emulated on direct mmu. */
8722 if (vcpu
->arch
.mmu
->root_role
.direct
) {
8723 unsigned int indirect_shadow_pages
;
8725 write_lock(&vcpu
->kvm
->mmu_lock
);
8726 indirect_shadow_pages
= vcpu
->kvm
->arch
.indirect_shadow_pages
;
8727 write_unlock(&vcpu
->kvm
->mmu_lock
);
8729 if (indirect_shadow_pages
)
8730 kvm_mmu_unprotect_page(vcpu
->kvm
, gpa_to_gfn(gpa
));
8736 * if emulation was due to access to shadowed page table
8737 * and it failed try to unshadow page and re-enter the
8738 * guest to let CPU execute the instruction.
8740 kvm_mmu_unprotect_page(vcpu
->kvm
, gpa_to_gfn(gpa
));
8743 * If the access faults on its page table, it can not
8744 * be fixed by unprotecting shadow page and it should
8745 * be reported to userspace.
8747 return !(emulation_type
& EMULTYPE_WRITE_PF_TO_SP
);
8750 static bool retry_instruction(struct x86_emulate_ctxt
*ctxt
,
8751 gpa_t cr2_or_gpa
, int emulation_type
)
8753 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
8754 unsigned long last_retry_eip
, last_retry_addr
, gpa
= cr2_or_gpa
;
8756 last_retry_eip
= vcpu
->arch
.last_retry_eip
;
8757 last_retry_addr
= vcpu
->arch
.last_retry_addr
;
8760 * If the emulation is caused by #PF and it is non-page_table
8761 * writing instruction, it means the VM-EXIT is caused by shadow
8762 * page protected, we can zap the shadow page and retry this
8763 * instruction directly.
8765 * Note: if the guest uses a non-page-table modifying instruction
8766 * on the PDE that points to the instruction, then we will unmap
8767 * the instruction and go to an infinite loop. So, we cache the
8768 * last retried eip and the last fault address, if we meet the eip
8769 * and the address again, we can break out of the potential infinite
8772 vcpu
->arch
.last_retry_eip
= vcpu
->arch
.last_retry_addr
= 0;
8774 if (!(emulation_type
& EMULTYPE_ALLOW_RETRY_PF
))
8777 if (WARN_ON_ONCE(is_guest_mode(vcpu
)) ||
8778 WARN_ON_ONCE(!(emulation_type
& EMULTYPE_PF
)))
8781 if (x86_page_table_writing_insn(ctxt
))
8784 if (ctxt
->eip
== last_retry_eip
&& last_retry_addr
== cr2_or_gpa
)
8787 vcpu
->arch
.last_retry_eip
= ctxt
->eip
;
8788 vcpu
->arch
.last_retry_addr
= cr2_or_gpa
;
8790 if (!vcpu
->arch
.mmu
->root_role
.direct
)
8791 gpa
= kvm_mmu_gva_to_gpa_write(vcpu
, cr2_or_gpa
, NULL
);
8793 kvm_mmu_unprotect_page(vcpu
->kvm
, gpa_to_gfn(gpa
));
8798 static int complete_emulated_mmio(struct kvm_vcpu
*vcpu
);
8799 static int complete_emulated_pio(struct kvm_vcpu
*vcpu
);
8801 static int kvm_vcpu_check_hw_bp(unsigned long addr
, u32 type
, u32 dr7
,
8810 for (i
= 0; i
< 4; i
++, enable
>>= 2, rwlen
>>= 4)
8811 if ((enable
& 3) && (rwlen
& 15) == type
&& db
[i
] == addr
)
8816 static int kvm_vcpu_do_singlestep(struct kvm_vcpu
*vcpu
)
8818 struct kvm_run
*kvm_run
= vcpu
->run
;
8820 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
8821 kvm_run
->debug
.arch
.dr6
= DR6_BS
| DR6_ACTIVE_LOW
;
8822 kvm_run
->debug
.arch
.pc
= kvm_get_linear_rip(vcpu
);
8823 kvm_run
->debug
.arch
.exception
= DB_VECTOR
;
8824 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
8827 kvm_queue_exception_p(vcpu
, DB_VECTOR
, DR6_BS
);
8831 int kvm_skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
8833 unsigned long rflags
= static_call(kvm_x86_get_rflags
)(vcpu
);
8836 r
= static_call(kvm_x86_skip_emulated_instruction
)(vcpu
);
8840 kvm_pmu_trigger_event(vcpu
, PERF_COUNT_HW_INSTRUCTIONS
);
8843 * rflags is the old, "raw" value of the flags. The new value has
8844 * not been saved yet.
8846 * This is correct even for TF set by the guest, because "the
8847 * processor will not generate this exception after the instruction
8848 * that sets the TF flag".
8850 if (unlikely(rflags
& X86_EFLAGS_TF
))
8851 r
= kvm_vcpu_do_singlestep(vcpu
);
8854 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction
);
8856 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu
*vcpu
)
8860 if (kvm_get_rflags(vcpu
) & X86_EFLAGS_RF
)
8864 * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active,
8865 * but AMD CPUs do not. MOV/POP SS blocking is rare, check that first
8866 * to avoid the relatively expensive CPUID lookup.
8868 shadow
= static_call(kvm_x86_get_interrupt_shadow
)(vcpu
);
8869 return (shadow
& KVM_X86_SHADOW_INT_MOV_SS
) &&
8870 guest_cpuid_is_intel(vcpu
);
8873 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu
*vcpu
,
8874 int emulation_type
, int *r
)
8876 WARN_ON_ONCE(emulation_type
& EMULTYPE_NO_DECODE
);
8879 * Do not check for code breakpoints if hardware has already done the
8880 * checks, as inferred from the emulation type. On NO_DECODE and SKIP,
8881 * the instruction has passed all exception checks, and all intercepted
8882 * exceptions that trigger emulation have lower priority than code
8883 * breakpoints, i.e. the fact that the intercepted exception occurred
8884 * means any code breakpoints have already been serviced.
8886 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
8887 * hardware has checked the RIP of the magic prefix, but not the RIP of
8888 * the instruction being emulated. The intent of forced emulation is
8889 * to behave as if KVM intercepted the instruction without an exception
8890 * and without a prefix.
8892 if (emulation_type
& (EMULTYPE_NO_DECODE
| EMULTYPE_SKIP
|
8893 EMULTYPE_TRAP_UD
| EMULTYPE_VMWARE_GP
| EMULTYPE_PF
))
8896 if (unlikely(vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
) &&
8897 (vcpu
->arch
.guest_debug_dr7
& DR7_BP_EN_MASK
)) {
8898 struct kvm_run
*kvm_run
= vcpu
->run
;
8899 unsigned long eip
= kvm_get_linear_rip(vcpu
);
8900 u32 dr6
= kvm_vcpu_check_hw_bp(eip
, 0,
8901 vcpu
->arch
.guest_debug_dr7
,
8905 kvm_run
->debug
.arch
.dr6
= dr6
| DR6_ACTIVE_LOW
;
8906 kvm_run
->debug
.arch
.pc
= eip
;
8907 kvm_run
->debug
.arch
.exception
= DB_VECTOR
;
8908 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
8914 if (unlikely(vcpu
->arch
.dr7
& DR7_BP_EN_MASK
) &&
8915 !kvm_is_code_breakpoint_inhibited(vcpu
)) {
8916 unsigned long eip
= kvm_get_linear_rip(vcpu
);
8917 u32 dr6
= kvm_vcpu_check_hw_bp(eip
, 0,
8922 kvm_queue_exception_p(vcpu
, DB_VECTOR
, dr6
);
8931 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt
*ctxt
)
8933 switch (ctxt
->opcode_len
) {
8940 case 0xe6: /* OUT */
8944 case 0x6c: /* INS */
8946 case 0x6e: /* OUTS */
8953 case 0x33: /* RDPMC */
8963 * Decode an instruction for emulation. The caller is responsible for handling
8964 * code breakpoints. Note, manually detecting code breakpoints is unnecessary
8965 * (and wrong) when emulating on an intercepted fault-like exception[*], as
8966 * code breakpoints have higher priority and thus have already been done by
8969 * [*] Except #MC, which is higher priority, but KVM should never emulate in
8970 * response to a machine check.
8972 int x86_decode_emulated_instruction(struct kvm_vcpu
*vcpu
, int emulation_type
,
8973 void *insn
, int insn_len
)
8975 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
8978 init_emulate_ctxt(vcpu
);
8980 r
= x86_decode_insn(ctxt
, insn
, insn_len
, emulation_type
);
8982 trace_kvm_emulate_insn_start(vcpu
);
8983 ++vcpu
->stat
.insn_emulation
;
8987 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction
);
8989 int x86_emulate_instruction(struct kvm_vcpu
*vcpu
, gpa_t cr2_or_gpa
,
8990 int emulation_type
, void *insn
, int insn_len
)
8993 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
8994 bool writeback
= true;
8996 if (unlikely(!kvm_can_emulate_insn(vcpu
, emulation_type
, insn
, insn_len
)))
8999 vcpu
->arch
.l1tf_flush_l1d
= true;
9001 if (!(emulation_type
& EMULTYPE_NO_DECODE
)) {
9002 kvm_clear_exception_queue(vcpu
);
9005 * Return immediately if RIP hits a code breakpoint, such #DBs
9006 * are fault-like and are higher priority than any faults on
9007 * the code fetch itself.
9009 if (kvm_vcpu_check_code_breakpoint(vcpu
, emulation_type
, &r
))
9012 r
= x86_decode_emulated_instruction(vcpu
, emulation_type
,
9014 if (r
!= EMULATION_OK
) {
9015 if ((emulation_type
& EMULTYPE_TRAP_UD
) ||
9016 (emulation_type
& EMULTYPE_TRAP_UD_FORCED
)) {
9017 kvm_queue_exception(vcpu
, UD_VECTOR
);
9020 if (reexecute_instruction(vcpu
, cr2_or_gpa
,
9024 if (ctxt
->have_exception
&&
9025 !(emulation_type
& EMULTYPE_SKIP
)) {
9027 * #UD should result in just EMULATION_FAILED, and trap-like
9028 * exception should not be encountered during decode.
9030 WARN_ON_ONCE(ctxt
->exception
.vector
== UD_VECTOR
||
9031 exception_type(ctxt
->exception
.vector
) == EXCPT_TRAP
);
9032 inject_emulated_exception(vcpu
);
9035 return handle_emulation_failure(vcpu
, emulation_type
);
9039 if ((emulation_type
& EMULTYPE_VMWARE_GP
) &&
9040 !is_vmware_backdoor_opcode(ctxt
)) {
9041 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
9046 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
9047 * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
9048 * The caller is responsible for updating interruptibility state and
9049 * injecting single-step #DBs.
9051 if (emulation_type
& EMULTYPE_SKIP
) {
9052 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
9053 ctxt
->eip
= (u32
)ctxt
->_eip
;
9055 ctxt
->eip
= ctxt
->_eip
;
9057 if (emulation_type
& EMULTYPE_COMPLETE_USER_EXIT
) {
9062 kvm_rip_write(vcpu
, ctxt
->eip
);
9063 if (ctxt
->eflags
& X86_EFLAGS_RF
)
9064 kvm_set_rflags(vcpu
, ctxt
->eflags
& ~X86_EFLAGS_RF
);
9068 if (retry_instruction(ctxt
, cr2_or_gpa
, emulation_type
))
9071 /* this is needed for vmware backdoor interface to work since it
9072 changes registers values during IO operation */
9073 if (vcpu
->arch
.emulate_regs_need_sync_from_vcpu
) {
9074 vcpu
->arch
.emulate_regs_need_sync_from_vcpu
= false;
9075 emulator_invalidate_register_cache(ctxt
);
9079 if (emulation_type
& EMULTYPE_PF
) {
9080 /* Save the faulting GPA (cr2) in the address field */
9081 ctxt
->exception
.address
= cr2_or_gpa
;
9083 /* With shadow page tables, cr2 contains a GVA or nGPA. */
9084 if (vcpu
->arch
.mmu
->root_role
.direct
) {
9085 ctxt
->gpa_available
= true;
9086 ctxt
->gpa_val
= cr2_or_gpa
;
9089 /* Sanitize the address out of an abundance of paranoia. */
9090 ctxt
->exception
.address
= 0;
9093 r
= x86_emulate_insn(ctxt
);
9095 if (r
== EMULATION_INTERCEPTED
)
9098 if (r
== EMULATION_FAILED
) {
9099 if (reexecute_instruction(vcpu
, cr2_or_gpa
, emulation_type
))
9102 return handle_emulation_failure(vcpu
, emulation_type
);
9105 if (ctxt
->have_exception
) {
9106 WARN_ON_ONCE(vcpu
->mmio_needed
&& !vcpu
->mmio_is_write
);
9107 vcpu
->mmio_needed
= false;
9109 inject_emulated_exception(vcpu
);
9110 } else if (vcpu
->arch
.pio
.count
) {
9111 if (!vcpu
->arch
.pio
.in
) {
9112 /* FIXME: return into emulator if single-stepping. */
9113 vcpu
->arch
.pio
.count
= 0;
9116 vcpu
->arch
.complete_userspace_io
= complete_emulated_pio
;
9119 } else if (vcpu
->mmio_needed
) {
9120 ++vcpu
->stat
.mmio_exits
;
9122 if (!vcpu
->mmio_is_write
)
9125 vcpu
->arch
.complete_userspace_io
= complete_emulated_mmio
;
9126 } else if (vcpu
->arch
.complete_userspace_io
) {
9129 } else if (r
== EMULATION_RESTART
)
9136 unsigned long rflags
= static_call(kvm_x86_get_rflags
)(vcpu
);
9137 toggle_interruptibility(vcpu
, ctxt
->interruptibility
);
9138 vcpu
->arch
.emulate_regs_need_sync_to_vcpu
= false;
9141 * Note, EXCPT_DB is assumed to be fault-like as the emulator
9142 * only supports code breakpoints and general detect #DB, both
9143 * of which are fault-like.
9145 if (!ctxt
->have_exception
||
9146 exception_type(ctxt
->exception
.vector
) == EXCPT_TRAP
) {
9147 kvm_pmu_trigger_event(vcpu
, PERF_COUNT_HW_INSTRUCTIONS
);
9148 if (ctxt
->is_branch
)
9149 kvm_pmu_trigger_event(vcpu
, PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
9150 kvm_rip_write(vcpu
, ctxt
->eip
);
9151 if (r
&& (ctxt
->tf
|| (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
)))
9152 r
= kvm_vcpu_do_singlestep(vcpu
);
9153 static_call_cond(kvm_x86_update_emulated_instruction
)(vcpu
);
9154 __kvm_set_rflags(vcpu
, ctxt
->eflags
);
9158 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
9159 * do nothing, and it will be requested again as soon as
9160 * the shadow expires. But we still need to check here,
9161 * because POPF has no interrupt shadow.
9163 if (unlikely((ctxt
->eflags
& ~rflags
) & X86_EFLAGS_IF
))
9164 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
9166 vcpu
->arch
.emulate_regs_need_sync_to_vcpu
= true;
9171 int kvm_emulate_instruction(struct kvm_vcpu
*vcpu
, int emulation_type
)
9173 return x86_emulate_instruction(vcpu
, 0, emulation_type
, NULL
, 0);
9175 EXPORT_SYMBOL_GPL(kvm_emulate_instruction
);
9177 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu
*vcpu
,
9178 void *insn
, int insn_len
)
9180 return x86_emulate_instruction(vcpu
, 0, 0, insn
, insn_len
);
9182 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer
);
9184 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu
*vcpu
)
9186 vcpu
->arch
.pio
.count
= 0;
9190 static int complete_fast_pio_out(struct kvm_vcpu
*vcpu
)
9192 vcpu
->arch
.pio
.count
= 0;
9194 if (unlikely(!kvm_is_linear_rip(vcpu
, vcpu
->arch
.pio
.linear_rip
)))
9197 return kvm_skip_emulated_instruction(vcpu
);
9200 static int kvm_fast_pio_out(struct kvm_vcpu
*vcpu
, int size
,
9201 unsigned short port
)
9203 unsigned long val
= kvm_rax_read(vcpu
);
9204 int ret
= emulator_pio_out(vcpu
, size
, port
, &val
, 1);
9210 * Workaround userspace that relies on old KVM behavior of %rip being
9211 * incremented prior to exiting to userspace to handle "OUT 0x7e".
9214 kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_OUT_7E_INC_RIP
)) {
9215 vcpu
->arch
.complete_userspace_io
=
9216 complete_fast_pio_out_port_0x7e
;
9217 kvm_skip_emulated_instruction(vcpu
);
9219 vcpu
->arch
.pio
.linear_rip
= kvm_get_linear_rip(vcpu
);
9220 vcpu
->arch
.complete_userspace_io
= complete_fast_pio_out
;
9225 static int complete_fast_pio_in(struct kvm_vcpu
*vcpu
)
9229 /* We should only ever be called with arch.pio.count equal to 1 */
9230 BUG_ON(vcpu
->arch
.pio
.count
!= 1);
9232 if (unlikely(!kvm_is_linear_rip(vcpu
, vcpu
->arch
.pio
.linear_rip
))) {
9233 vcpu
->arch
.pio
.count
= 0;
9237 /* For size less than 4 we merge, else we zero extend */
9238 val
= (vcpu
->arch
.pio
.size
< 4) ? kvm_rax_read(vcpu
) : 0;
9240 complete_emulator_pio_in(vcpu
, &val
);
9241 kvm_rax_write(vcpu
, val
);
9243 return kvm_skip_emulated_instruction(vcpu
);
9246 static int kvm_fast_pio_in(struct kvm_vcpu
*vcpu
, int size
,
9247 unsigned short port
)
9252 /* For size less than 4 we merge, else we zero extend */
9253 val
= (size
< 4) ? kvm_rax_read(vcpu
) : 0;
9255 ret
= emulator_pio_in(vcpu
, size
, port
, &val
, 1);
9257 kvm_rax_write(vcpu
, val
);
9261 vcpu
->arch
.pio
.linear_rip
= kvm_get_linear_rip(vcpu
);
9262 vcpu
->arch
.complete_userspace_io
= complete_fast_pio_in
;
9267 int kvm_fast_pio(struct kvm_vcpu
*vcpu
, int size
, unsigned short port
, int in
)
9272 ret
= kvm_fast_pio_in(vcpu
, size
, port
);
9274 ret
= kvm_fast_pio_out(vcpu
, size
, port
);
9275 return ret
&& kvm_skip_emulated_instruction(vcpu
);
9277 EXPORT_SYMBOL_GPL(kvm_fast_pio
);
9279 static int kvmclock_cpu_down_prep(unsigned int cpu
)
9281 __this_cpu_write(cpu_tsc_khz
, 0);
9285 static void tsc_khz_changed(void *data
)
9287 struct cpufreq_freqs
*freq
= data
;
9290 WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC
));
9295 khz
= cpufreq_quick_get(raw_smp_processor_id());
9298 __this_cpu_write(cpu_tsc_khz
, khz
);
9301 #ifdef CONFIG_X86_64
9302 static void kvm_hyperv_tsc_notifier(void)
9307 mutex_lock(&kvm_lock
);
9308 list_for_each_entry(kvm
, &vm_list
, vm_list
)
9309 kvm_make_mclock_inprogress_request(kvm
);
9311 /* no guest entries from this point */
9312 hyperv_stop_tsc_emulation();
9314 /* TSC frequency always matches when on Hyper-V */
9315 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC
)) {
9316 for_each_present_cpu(cpu
)
9317 per_cpu(cpu_tsc_khz
, cpu
) = tsc_khz
;
9319 kvm_caps
.max_guest_tsc_khz
= tsc_khz
;
9321 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
9322 __kvm_start_pvclock_update(kvm
);
9323 pvclock_update_vm_gtod_copy(kvm
);
9324 kvm_end_pvclock_update(kvm
);
9327 mutex_unlock(&kvm_lock
);
9331 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs
*freq
, int cpu
)
9334 struct kvm_vcpu
*vcpu
;
9339 * We allow guests to temporarily run on slowing clocks,
9340 * provided we notify them after, or to run on accelerating
9341 * clocks, provided we notify them before. Thus time never
9344 * However, we have a problem. We can't atomically update
9345 * the frequency of a given CPU from this function; it is
9346 * merely a notifier, which can be called from any CPU.
9347 * Changing the TSC frequency at arbitrary points in time
9348 * requires a recomputation of local variables related to
9349 * the TSC for each VCPU. We must flag these local variables
9350 * to be updated and be sure the update takes place with the
9351 * new frequency before any guests proceed.
9353 * Unfortunately, the combination of hotplug CPU and frequency
9354 * change creates an intractable locking scenario; the order
9355 * of when these callouts happen is undefined with respect to
9356 * CPU hotplug, and they can race with each other. As such,
9357 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
9358 * undefined; you can actually have a CPU frequency change take
9359 * place in between the computation of X and the setting of the
9360 * variable. To protect against this problem, all updates of
9361 * the per_cpu tsc_khz variable are done in an interrupt
9362 * protected IPI, and all callers wishing to update the value
9363 * must wait for a synchronous IPI to complete (which is trivial
9364 * if the caller is on the CPU already). This establishes the
9365 * necessary total order on variable updates.
9367 * Note that because a guest time update may take place
9368 * anytime after the setting of the VCPU's request bit, the
9369 * correct TSC value must be set before the request. However,
9370 * to ensure the update actually makes it to any guest which
9371 * starts running in hardware virtualization between the set
9372 * and the acquisition of the spinlock, we must also ping the
9373 * CPU after setting the request bit.
9377 smp_call_function_single(cpu
, tsc_khz_changed
, freq
, 1);
9379 mutex_lock(&kvm_lock
);
9380 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
9381 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
9382 if (vcpu
->cpu
!= cpu
)
9384 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
9385 if (vcpu
->cpu
!= raw_smp_processor_id())
9389 mutex_unlock(&kvm_lock
);
9391 if (freq
->old
< freq
->new && send_ipi
) {
9393 * We upscale the frequency. Must make the guest
9394 * doesn't see old kvmclock values while running with
9395 * the new frequency, otherwise we risk the guest sees
9396 * time go backwards.
9398 * In case we update the frequency for another cpu
9399 * (which might be in guest context) send an interrupt
9400 * to kick the cpu out of guest context. Next time
9401 * guest context is entered kvmclock will be updated,
9402 * so the guest will not see stale values.
9404 smp_call_function_single(cpu
, tsc_khz_changed
, freq
, 1);
9408 static int kvmclock_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
9411 struct cpufreq_freqs
*freq
= data
;
9414 if (val
== CPUFREQ_PRECHANGE
&& freq
->old
> freq
->new)
9416 if (val
== CPUFREQ_POSTCHANGE
&& freq
->old
< freq
->new)
9419 for_each_cpu(cpu
, freq
->policy
->cpus
)
9420 __kvmclock_cpufreq_notifier(freq
, cpu
);
9425 static struct notifier_block kvmclock_cpufreq_notifier_block
= {
9426 .notifier_call
= kvmclock_cpufreq_notifier
9429 static int kvmclock_cpu_online(unsigned int cpu
)
9431 tsc_khz_changed(NULL
);
9435 static void kvm_timer_init(void)
9437 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC
)) {
9438 max_tsc_khz
= tsc_khz
;
9440 if (IS_ENABLED(CONFIG_CPU_FREQ
)) {
9441 struct cpufreq_policy
*policy
;
9445 policy
= cpufreq_cpu_get(cpu
);
9447 if (policy
->cpuinfo
.max_freq
)
9448 max_tsc_khz
= policy
->cpuinfo
.max_freq
;
9449 cpufreq_cpu_put(policy
);
9453 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block
,
9454 CPUFREQ_TRANSITION_NOTIFIER
);
9456 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE
, "x86/kvm/clk:online",
9457 kvmclock_cpu_online
, kvmclock_cpu_down_prep
);
9461 #ifdef CONFIG_X86_64
9462 static void pvclock_gtod_update_fn(struct work_struct
*work
)
9465 struct kvm_vcpu
*vcpu
;
9468 mutex_lock(&kvm_lock
);
9469 list_for_each_entry(kvm
, &vm_list
, vm_list
)
9470 kvm_for_each_vcpu(i
, vcpu
, kvm
)
9471 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
);
9472 atomic_set(&kvm_guest_has_master_clock
, 0);
9473 mutex_unlock(&kvm_lock
);
9476 static DECLARE_WORK(pvclock_gtod_work
, pvclock_gtod_update_fn
);
9479 * Indirection to move queue_work() out of the tk_core.seq write held
9480 * region to prevent possible deadlocks against time accessors which
9481 * are invoked with work related locks held.
9483 static void pvclock_irq_work_fn(struct irq_work
*w
)
9485 queue_work(system_long_wq
, &pvclock_gtod_work
);
9488 static DEFINE_IRQ_WORK(pvclock_irq_work
, pvclock_irq_work_fn
);
9491 * Notification about pvclock gtod data update.
9493 static int pvclock_gtod_notify(struct notifier_block
*nb
, unsigned long unused
,
9496 struct pvclock_gtod_data
*gtod
= &pvclock_gtod_data
;
9497 struct timekeeper
*tk
= priv
;
9499 update_pvclock_gtod(tk
);
9502 * Disable master clock if host does not trust, or does not use,
9503 * TSC based clocksource. Delegate queue_work() to irq_work as
9504 * this is invoked with tk_core.seq write held.
9506 if (!gtod_is_based_on_tsc(gtod
->clock
.vclock_mode
) &&
9507 atomic_read(&kvm_guest_has_master_clock
) != 0)
9508 irq_work_queue(&pvclock_irq_work
);
9512 static struct notifier_block pvclock_gtod_notifier
= {
9513 .notifier_call
= pvclock_gtod_notify
,
9517 static inline void kvm_ops_update(struct kvm_x86_init_ops
*ops
)
9519 memcpy(&kvm_x86_ops
, ops
->runtime_ops
, sizeof(kvm_x86_ops
));
9521 #define __KVM_X86_OP(func) \
9522 static_call_update(kvm_x86_##func, kvm_x86_ops.func);
9523 #define KVM_X86_OP(func) \
9524 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
9525 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP
9526 #define KVM_X86_OP_OPTIONAL_RET0(func) \
9527 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
9528 (void *)__static_call_return0);
9529 #include <asm/kvm-x86-ops.h>
9532 kvm_pmu_ops_update(ops
->pmu_ops
);
9535 static int kvm_x86_check_processor_compatibility(void)
9537 int cpu
= smp_processor_id();
9538 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
9541 * Compatibility checks are done when loading KVM and when enabling
9542 * hardware, e.g. during CPU hotplug, to ensure all online CPUs are
9543 * compatible, i.e. KVM should never perform a compatibility check on
9546 WARN_ON(!cpu_online(cpu
));
9548 if (__cr4_reserved_bits(cpu_has
, c
) !=
9549 __cr4_reserved_bits(cpu_has
, &boot_cpu_data
))
9552 return static_call(kvm_x86_check_processor_compatibility
)();
9555 static void kvm_x86_check_cpu_compat(void *ret
)
9557 *(int *)ret
= kvm_x86_check_processor_compatibility();
9560 static int __kvm_x86_vendor_init(struct kvm_x86_init_ops
*ops
)
9565 if (kvm_x86_ops
.hardware_enable
) {
9566 pr_err("already loaded vendor module '%s'\n", kvm_x86_ops
.name
);
9571 * KVM explicitly assumes that the guest has an FPU and
9572 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
9573 * vCPU's FPU state as a fxregs_state struct.
9575 if (!boot_cpu_has(X86_FEATURE_FPU
) || !boot_cpu_has(X86_FEATURE_FXSR
)) {
9576 pr_err("inadequate fpu\n");
9580 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC
)) {
9581 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
9586 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes
9587 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something
9588 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother
9589 * with an exception. PAT[0] is set to WB on RESET and also by the
9590 * kernel, i.e. failure indicates a kernel bug or broken firmware.
9592 if (rdmsrl_safe(MSR_IA32_CR_PAT
, &host_pat
) ||
9593 (host_pat
& GENMASK(2, 0)) != 6) {
9594 pr_err("host PAT[0] is not WB\n");
9598 x86_emulator_cache
= kvm_alloc_emulator_cache();
9599 if (!x86_emulator_cache
) {
9600 pr_err("failed to allocate cache for x86 emulator\n");
9604 user_return_msrs
= alloc_percpu(struct kvm_user_return_msrs
);
9605 if (!user_return_msrs
) {
9606 pr_err("failed to allocate percpu kvm_user_return_msrs\n");
9608 goto out_free_x86_emulator_cache
;
9610 kvm_nr_uret_msrs
= 0;
9612 r
= kvm_mmu_vendor_module_init();
9614 goto out_free_percpu
;
9616 if (boot_cpu_has(X86_FEATURE_XSAVE
)) {
9617 host_xcr0
= xgetbv(XCR_XFEATURE_ENABLED_MASK
);
9618 kvm_caps
.supported_xcr0
= host_xcr0
& KVM_SUPPORTED_XCR0
;
9621 rdmsrl_safe(MSR_EFER
, &host_efer
);
9623 if (boot_cpu_has(X86_FEATURE_XSAVES
))
9624 rdmsrl(MSR_IA32_XSS
, host_xss
);
9626 kvm_init_pmu_capability(ops
->pmu_ops
);
9628 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES
))
9629 rdmsrl(MSR_IA32_ARCH_CAPABILITIES
, host_arch_capabilities
);
9631 r
= ops
->hardware_setup();
9635 kvm_ops_update(ops
);
9637 for_each_online_cpu(cpu
) {
9638 smp_call_function_single(cpu
, kvm_x86_check_cpu_compat
, &r
, 1);
9640 goto out_unwind_ops
;
9644 * Point of no return! DO NOT add error paths below this point unless
9645 * absolutely necessary, as most operations from this point forward
9646 * require unwinding.
9650 if (pi_inject_timer
== -1)
9651 pi_inject_timer
= housekeeping_enabled(HK_TYPE_TIMER
);
9652 #ifdef CONFIG_X86_64
9653 pvclock_gtod_register_notifier(&pvclock_gtod_notifier
);
9655 if (hypervisor_is_type(X86_HYPER_MS_HYPERV
))
9656 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier
);
9659 kvm_register_perf_callbacks(ops
->handle_intel_pt_intr
);
9661 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES
))
9662 kvm_caps
.supported_xss
= 0;
9664 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
9665 cr4_reserved_bits
= __cr4_reserved_bits(__kvm_cpu_cap_has
, UNUSED_
);
9666 #undef __kvm_cpu_cap_has
9668 if (kvm_caps
.has_tsc_control
) {
9670 * Make sure the user can only configure tsc_khz values that
9671 * fit into a signed integer.
9672 * A min value is not calculated because it will always
9673 * be 1 on all machines.
9675 u64 max
= min(0x7fffffffULL
,
9676 __scale_tsc(kvm_caps
.max_tsc_scaling_ratio
, tsc_khz
));
9677 kvm_caps
.max_guest_tsc_khz
= max
;
9679 kvm_caps
.default_tsc_scaling_ratio
= 1ULL << kvm_caps
.tsc_scaling_ratio_frac_bits
;
9680 kvm_init_msr_lists();
9684 kvm_x86_ops
.hardware_enable
= NULL
;
9685 static_call(kvm_x86_hardware_unsetup
)();
9687 kvm_mmu_vendor_module_exit();
9689 free_percpu(user_return_msrs
);
9690 out_free_x86_emulator_cache
:
9691 kmem_cache_destroy(x86_emulator_cache
);
9695 int kvm_x86_vendor_init(struct kvm_x86_init_ops
*ops
)
9699 mutex_lock(&vendor_module_lock
);
9700 r
= __kvm_x86_vendor_init(ops
);
9701 mutex_unlock(&vendor_module_lock
);
9705 EXPORT_SYMBOL_GPL(kvm_x86_vendor_init
);
9707 void kvm_x86_vendor_exit(void)
9709 kvm_unregister_perf_callbacks();
9711 #ifdef CONFIG_X86_64
9712 if (hypervisor_is_type(X86_HYPER_MS_HYPERV
))
9713 clear_hv_tscchange_cb();
9717 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC
)) {
9718 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block
,
9719 CPUFREQ_TRANSITION_NOTIFIER
);
9720 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE
);
9722 #ifdef CONFIG_X86_64
9723 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier
);
9724 irq_work_sync(&pvclock_irq_work
);
9725 cancel_work_sync(&pvclock_gtod_work
);
9727 static_call(kvm_x86_hardware_unsetup
)();
9728 kvm_mmu_vendor_module_exit();
9729 free_percpu(user_return_msrs
);
9730 kmem_cache_destroy(x86_emulator_cache
);
9731 #ifdef CONFIG_KVM_XEN
9732 static_key_deferred_flush(&kvm_xen_enabled
);
9733 WARN_ON(static_branch_unlikely(&kvm_xen_enabled
.key
));
9735 mutex_lock(&vendor_module_lock
);
9736 kvm_x86_ops
.hardware_enable
= NULL
;
9737 mutex_unlock(&vendor_module_lock
);
9739 EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit
);
9741 static int __kvm_emulate_halt(struct kvm_vcpu
*vcpu
, int state
, int reason
)
9744 * The vCPU has halted, e.g. executed HLT. Update the run state if the
9745 * local APIC is in-kernel, the run loop will detect the non-runnable
9746 * state and halt the vCPU. Exit to userspace if the local APIC is
9747 * managed by userspace, in which case userspace is responsible for
9748 * handling wake events.
9750 ++vcpu
->stat
.halt_exits
;
9751 if (lapic_in_kernel(vcpu
)) {
9752 vcpu
->arch
.mp_state
= state
;
9755 vcpu
->run
->exit_reason
= reason
;
9760 int kvm_emulate_halt_noskip(struct kvm_vcpu
*vcpu
)
9762 return __kvm_emulate_halt(vcpu
, KVM_MP_STATE_HALTED
, KVM_EXIT_HLT
);
9764 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip
);
9766 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
)
9768 int ret
= kvm_skip_emulated_instruction(vcpu
);
9770 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
9771 * KVM_EXIT_DEBUG here.
9773 return kvm_emulate_halt_noskip(vcpu
) && ret
;
9775 EXPORT_SYMBOL_GPL(kvm_emulate_halt
);
9777 int kvm_emulate_ap_reset_hold(struct kvm_vcpu
*vcpu
)
9779 int ret
= kvm_skip_emulated_instruction(vcpu
);
9781 return __kvm_emulate_halt(vcpu
, KVM_MP_STATE_AP_RESET_HOLD
,
9782 KVM_EXIT_AP_RESET_HOLD
) && ret
;
9784 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold
);
9786 #ifdef CONFIG_X86_64
9787 static int kvm_pv_clock_pairing(struct kvm_vcpu
*vcpu
, gpa_t paddr
,
9788 unsigned long clock_type
)
9790 struct kvm_clock_pairing clock_pairing
;
9791 struct timespec64 ts
;
9795 if (clock_type
!= KVM_CLOCK_PAIRING_WALLCLOCK
)
9796 return -KVM_EOPNOTSUPP
;
9799 * When tsc is in permanent catchup mode guests won't be able to use
9800 * pvclock_read_retry loop to get consistent view of pvclock
9802 if (vcpu
->arch
.tsc_always_catchup
)
9803 return -KVM_EOPNOTSUPP
;
9805 if (!kvm_get_walltime_and_clockread(&ts
, &cycle
))
9806 return -KVM_EOPNOTSUPP
;
9808 clock_pairing
.sec
= ts
.tv_sec
;
9809 clock_pairing
.nsec
= ts
.tv_nsec
;
9810 clock_pairing
.tsc
= kvm_read_l1_tsc(vcpu
, cycle
);
9811 clock_pairing
.flags
= 0;
9812 memset(&clock_pairing
.pad
, 0, sizeof(clock_pairing
.pad
));
9815 if (kvm_write_guest(vcpu
->kvm
, paddr
, &clock_pairing
,
9816 sizeof(struct kvm_clock_pairing
)))
9824 * kvm_pv_kick_cpu_op: Kick a vcpu.
9826 * @apicid - apicid of vcpu to be kicked.
9828 static void kvm_pv_kick_cpu_op(struct kvm
*kvm
, int apicid
)
9831 * All other fields are unused for APIC_DM_REMRD, but may be consumed by
9832 * common code, e.g. for tracing. Defer initialization to the compiler.
9834 struct kvm_lapic_irq lapic_irq
= {
9835 .delivery_mode
= APIC_DM_REMRD
,
9836 .dest_mode
= APIC_DEST_PHYSICAL
,
9837 .shorthand
= APIC_DEST_NOSHORT
,
9841 kvm_irq_delivery_to_apic(kvm
, NULL
, &lapic_irq
, NULL
);
9844 bool kvm_apicv_activated(struct kvm
*kvm
)
9846 return (READ_ONCE(kvm
->arch
.apicv_inhibit_reasons
) == 0);
9848 EXPORT_SYMBOL_GPL(kvm_apicv_activated
);
9850 bool kvm_vcpu_apicv_activated(struct kvm_vcpu
*vcpu
)
9852 ulong vm_reasons
= READ_ONCE(vcpu
->kvm
->arch
.apicv_inhibit_reasons
);
9853 ulong vcpu_reasons
= static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons
)(vcpu
);
9855 return (vm_reasons
| vcpu_reasons
) == 0;
9857 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated
);
9859 static void set_or_clear_apicv_inhibit(unsigned long *inhibits
,
9860 enum kvm_apicv_inhibit reason
, bool set
)
9863 __set_bit(reason
, inhibits
);
9865 __clear_bit(reason
, inhibits
);
9867 trace_kvm_apicv_inhibit_changed(reason
, set
, *inhibits
);
9870 static void kvm_apicv_init(struct kvm
*kvm
)
9872 unsigned long *inhibits
= &kvm
->arch
.apicv_inhibit_reasons
;
9874 init_rwsem(&kvm
->arch
.apicv_update_lock
);
9876 set_or_clear_apicv_inhibit(inhibits
, APICV_INHIBIT_REASON_ABSENT
, true);
9879 set_or_clear_apicv_inhibit(inhibits
,
9880 APICV_INHIBIT_REASON_DISABLE
, true);
9883 static void kvm_sched_yield(struct kvm_vcpu
*vcpu
, unsigned long dest_id
)
9885 struct kvm_vcpu
*target
= NULL
;
9886 struct kvm_apic_map
*map
;
9888 vcpu
->stat
.directed_yield_attempted
++;
9890 if (single_task_running())
9894 map
= rcu_dereference(vcpu
->kvm
->arch
.apic_map
);
9896 if (likely(map
) && dest_id
<= map
->max_apic_id
&& map
->phys_map
[dest_id
])
9897 target
= map
->phys_map
[dest_id
]->vcpu
;
9901 if (!target
|| !READ_ONCE(target
->ready
))
9904 /* Ignore requests to yield to self */
9908 if (kvm_vcpu_yield_to(target
) <= 0)
9911 vcpu
->stat
.directed_yield_successful
++;
9917 static int complete_hypercall_exit(struct kvm_vcpu
*vcpu
)
9919 u64 ret
= vcpu
->run
->hypercall
.ret
;
9921 if (!is_64_bit_mode(vcpu
))
9923 kvm_rax_write(vcpu
, ret
);
9924 ++vcpu
->stat
.hypercalls
;
9925 return kvm_skip_emulated_instruction(vcpu
);
9928 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
)
9930 unsigned long nr
, a0
, a1
, a2
, a3
, ret
;
9933 if (kvm_xen_hypercall_enabled(vcpu
->kvm
))
9934 return kvm_xen_hypercall(vcpu
);
9936 if (kvm_hv_hypercall_enabled(vcpu
))
9937 return kvm_hv_hypercall(vcpu
);
9939 nr
= kvm_rax_read(vcpu
);
9940 a0
= kvm_rbx_read(vcpu
);
9941 a1
= kvm_rcx_read(vcpu
);
9942 a2
= kvm_rdx_read(vcpu
);
9943 a3
= kvm_rsi_read(vcpu
);
9945 trace_kvm_hypercall(nr
, a0
, a1
, a2
, a3
);
9947 op_64_bit
= is_64_bit_hypercall(vcpu
);
9956 if (static_call(kvm_x86_get_cpl
)(vcpu
) != 0) {
9964 case KVM_HC_VAPIC_POLL_IRQ
:
9967 case KVM_HC_KICK_CPU
:
9968 if (!guest_pv_has(vcpu
, KVM_FEATURE_PV_UNHALT
))
9971 kvm_pv_kick_cpu_op(vcpu
->kvm
, a1
);
9972 kvm_sched_yield(vcpu
, a1
);
9975 #ifdef CONFIG_X86_64
9976 case KVM_HC_CLOCK_PAIRING
:
9977 ret
= kvm_pv_clock_pairing(vcpu
, a0
, a1
);
9980 case KVM_HC_SEND_IPI
:
9981 if (!guest_pv_has(vcpu
, KVM_FEATURE_PV_SEND_IPI
))
9984 ret
= kvm_pv_send_ipi(vcpu
->kvm
, a0
, a1
, a2
, a3
, op_64_bit
);
9986 case KVM_HC_SCHED_YIELD
:
9987 if (!guest_pv_has(vcpu
, KVM_FEATURE_PV_SCHED_YIELD
))
9990 kvm_sched_yield(vcpu
, a0
);
9993 case KVM_HC_MAP_GPA_RANGE
: {
9994 u64 gpa
= a0
, npages
= a1
, attrs
= a2
;
9997 if (!(vcpu
->kvm
->arch
.hypercall_exit_enabled
& (1 << KVM_HC_MAP_GPA_RANGE
)))
10000 if (!PAGE_ALIGNED(gpa
) || !npages
||
10001 gpa_to_gfn(gpa
) + npages
<= gpa_to_gfn(gpa
)) {
10006 vcpu
->run
->exit_reason
= KVM_EXIT_HYPERCALL
;
10007 vcpu
->run
->hypercall
.nr
= KVM_HC_MAP_GPA_RANGE
;
10008 vcpu
->run
->hypercall
.args
[0] = gpa
;
10009 vcpu
->run
->hypercall
.args
[1] = npages
;
10010 vcpu
->run
->hypercall
.args
[2] = attrs
;
10011 vcpu
->run
->hypercall
.flags
= 0;
10013 vcpu
->run
->hypercall
.flags
|= KVM_EXIT_HYPERCALL_LONG_MODE
;
10015 WARN_ON_ONCE(vcpu
->run
->hypercall
.flags
& KVM_EXIT_HYPERCALL_MBZ
);
10016 vcpu
->arch
.complete_userspace_io
= complete_hypercall_exit
;
10026 kvm_rax_write(vcpu
, ret
);
10028 ++vcpu
->stat
.hypercalls
;
10029 return kvm_skip_emulated_instruction(vcpu
);
10031 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall
);
10033 static int emulator_fix_hypercall(struct x86_emulate_ctxt
*ctxt
)
10035 struct kvm_vcpu
*vcpu
= emul_to_vcpu(ctxt
);
10036 char instruction
[3];
10037 unsigned long rip
= kvm_rip_read(vcpu
);
10040 * If the quirk is disabled, synthesize a #UD and let the guest pick up
10043 if (!kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_FIX_HYPERCALL_INSN
)) {
10044 ctxt
->exception
.error_code_valid
= false;
10045 ctxt
->exception
.vector
= UD_VECTOR
;
10046 ctxt
->have_exception
= true;
10047 return X86EMUL_PROPAGATE_FAULT
;
10050 static_call(kvm_x86_patch_hypercall
)(vcpu
, instruction
);
10052 return emulator_write_emulated(ctxt
, rip
, instruction
, 3,
10056 static int dm_request_for_irq_injection(struct kvm_vcpu
*vcpu
)
10058 return vcpu
->run
->request_interrupt_window
&&
10059 likely(!pic_in_kernel(vcpu
->kvm
));
10062 /* Called within kvm->srcu read side. */
10063 static void post_kvm_run_save(struct kvm_vcpu
*vcpu
)
10065 struct kvm_run
*kvm_run
= vcpu
->run
;
10067 kvm_run
->if_flag
= static_call(kvm_x86_get_if_flag
)(vcpu
);
10068 kvm_run
->cr8
= kvm_get_cr8(vcpu
);
10069 kvm_run
->apic_base
= kvm_get_apic_base(vcpu
);
10071 kvm_run
->ready_for_interrupt_injection
=
10072 pic_in_kernel(vcpu
->kvm
) ||
10073 kvm_vcpu_ready_for_interrupt_injection(vcpu
);
10076 kvm_run
->flags
|= KVM_RUN_X86_SMM
;
10079 static void update_cr8_intercept(struct kvm_vcpu
*vcpu
)
10083 if (!kvm_x86_ops
.update_cr8_intercept
)
10086 if (!lapic_in_kernel(vcpu
))
10089 if (vcpu
->arch
.apic
->apicv_active
)
10092 if (!vcpu
->arch
.apic
->vapic_addr
)
10093 max_irr
= kvm_lapic_find_highest_irr(vcpu
);
10100 tpr
= kvm_lapic_get_cr8(vcpu
);
10102 static_call(kvm_x86_update_cr8_intercept
)(vcpu
, tpr
, max_irr
);
10106 int kvm_check_nested_events(struct kvm_vcpu
*vcpu
)
10108 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT
, vcpu
)) {
10109 kvm_x86_ops
.nested_ops
->triple_fault(vcpu
);
10113 return kvm_x86_ops
.nested_ops
->check_events(vcpu
);
10116 static void kvm_inject_exception(struct kvm_vcpu
*vcpu
)
10119 * Suppress the error code if the vCPU is in Real Mode, as Real Mode
10120 * exceptions don't report error codes. The presence of an error code
10121 * is carried with the exception and only stripped when the exception
10122 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
10123 * report an error code despite the CPU being in Real Mode.
10125 vcpu
->arch
.exception
.has_error_code
&= is_protmode(vcpu
);
10127 trace_kvm_inj_exception(vcpu
->arch
.exception
.vector
,
10128 vcpu
->arch
.exception
.has_error_code
,
10129 vcpu
->arch
.exception
.error_code
,
10130 vcpu
->arch
.exception
.injected
);
10132 static_call(kvm_x86_inject_exception
)(vcpu
);
10136 * Check for any event (interrupt or exception) that is ready to be injected,
10137 * and if there is at least one event, inject the event with the highest
10138 * priority. This handles both "pending" events, i.e. events that have never
10139 * been injected into the guest, and "injected" events, i.e. events that were
10140 * injected as part of a previous VM-Enter, but weren't successfully delivered
10141 * and need to be re-injected.
10143 * Note, this is not guaranteed to be invoked on a guest instruction boundary,
10144 * i.e. doesn't guarantee that there's an event window in the guest. KVM must
10145 * be able to inject exceptions in the "middle" of an instruction, and so must
10146 * also be able to re-inject NMIs and IRQs in the middle of an instruction.
10147 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
10148 * boundaries is necessary and correct.
10150 * For simplicity, KVM uses a single path to inject all events (except events
10151 * that are injected directly from L1 to L2) and doesn't explicitly track
10152 * instruction boundaries for asynchronous events. However, because VM-Exits
10153 * that can occur during instruction execution typically result in KVM skipping
10154 * the instruction or injecting an exception, e.g. instruction and exception
10155 * intercepts, and because pending exceptions have higher priority than pending
10156 * interrupts, KVM still honors instruction boundaries in most scenarios.
10158 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
10159 * the instruction or inject an exception, then KVM can incorrecty inject a new
10160 * asynchrounous event if the event became pending after the CPU fetched the
10161 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation)
10162 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
10163 * injected on the restarted instruction instead of being deferred until the
10164 * instruction completes.
10166 * In practice, this virtualization hole is unlikely to be observed by the
10167 * guest, and even less likely to cause functional problems. To detect the
10168 * hole, the guest would have to trigger an event on a side effect of an early
10169 * phase of instruction execution, e.g. on the instruction fetch from memory.
10170 * And for it to be a functional problem, the guest would need to depend on the
10171 * ordering between that side effect, the instruction completing, _and_ the
10172 * delivery of the asynchronous event.
10174 static int kvm_check_and_inject_events(struct kvm_vcpu
*vcpu
,
10175 bool *req_immediate_exit
)
10181 * Process nested events first, as nested VM-Exit supercedes event
10182 * re-injection. If there's an event queued for re-injection, it will
10183 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
10185 if (is_guest_mode(vcpu
))
10186 r
= kvm_check_nested_events(vcpu
);
10191 * Re-inject exceptions and events *especially* if immediate entry+exit
10192 * to/from L2 is needed, as any event that has already been injected
10193 * into L2 needs to complete its lifecycle before injecting a new event.
10195 * Don't re-inject an NMI or interrupt if there is a pending exception.
10196 * This collision arises if an exception occurred while vectoring the
10197 * injected event, KVM intercepted said exception, and KVM ultimately
10198 * determined the fault belongs to the guest and queues the exception
10199 * for injection back into the guest.
10201 * "Injected" interrupts can also collide with pending exceptions if
10202 * userspace ignores the "ready for injection" flag and blindly queues
10203 * an interrupt. In that case, prioritizing the exception is correct,
10204 * as the exception "occurred" before the exit to userspace. Trap-like
10205 * exceptions, e.g. most #DBs, have higher priority than interrupts.
10206 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest
10207 * priority, they're only generated (pended) during instruction
10208 * execution, and interrupts are recognized at instruction boundaries.
10209 * Thus a pending fault-like exception means the fault occurred on the
10210 * *previous* instruction and must be serviced prior to recognizing any
10211 * new events in order to fully complete the previous instruction.
10213 if (vcpu
->arch
.exception
.injected
)
10214 kvm_inject_exception(vcpu
);
10215 else if (kvm_is_exception_pending(vcpu
))
10217 else if (vcpu
->arch
.nmi_injected
)
10218 static_call(kvm_x86_inject_nmi
)(vcpu
);
10219 else if (vcpu
->arch
.interrupt
.injected
)
10220 static_call(kvm_x86_inject_irq
)(vcpu
, true);
10223 * Exceptions that morph to VM-Exits are handled above, and pending
10224 * exceptions on top of injected exceptions that do not VM-Exit should
10225 * either morph to #DF or, sadly, override the injected exception.
10227 WARN_ON_ONCE(vcpu
->arch
.exception
.injected
&&
10228 vcpu
->arch
.exception
.pending
);
10231 * Bail if immediate entry+exit to/from the guest is needed to complete
10232 * nested VM-Enter or event re-injection so that a different pending
10233 * event can be serviced (or if KVM needs to exit to userspace).
10235 * Otherwise, continue processing events even if VM-Exit occurred. The
10236 * VM-Exit will have cleared exceptions that were meant for L2, but
10237 * there may now be events that can be injected into L1.
10243 * A pending exception VM-Exit should either result in nested VM-Exit
10244 * or force an immediate re-entry and exit to/from L2, and exception
10245 * VM-Exits cannot be injected (flag should _never_ be set).
10247 WARN_ON_ONCE(vcpu
->arch
.exception_vmexit
.injected
||
10248 vcpu
->arch
.exception_vmexit
.pending
);
10251 * New events, other than exceptions, cannot be injected if KVM needs
10252 * to re-inject a previous event. See above comments on re-injecting
10253 * for why pending exceptions get priority.
10255 can_inject
= !kvm_event_needs_reinjection(vcpu
);
10257 if (vcpu
->arch
.exception
.pending
) {
10259 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
10260 * value pushed on the stack. Trap-like exception and all #DBs
10261 * leave RF as-is (KVM follows Intel's behavior in this regard;
10262 * AMD states that code breakpoint #DBs excplitly clear RF=0).
10264 * Note, most versions of Intel's SDM and AMD's APM incorrectly
10265 * describe the behavior of General Detect #DBs, which are
10266 * fault-like. They do _not_ set RF, a la code breakpoints.
10268 if (exception_type(vcpu
->arch
.exception
.vector
) == EXCPT_FAULT
)
10269 __kvm_set_rflags(vcpu
, kvm_get_rflags(vcpu
) |
10272 if (vcpu
->arch
.exception
.vector
== DB_VECTOR
) {
10273 kvm_deliver_exception_payload(vcpu
, &vcpu
->arch
.exception
);
10274 if (vcpu
->arch
.dr7
& DR7_GD
) {
10275 vcpu
->arch
.dr7
&= ~DR7_GD
;
10276 kvm_update_dr7(vcpu
);
10280 kvm_inject_exception(vcpu
);
10282 vcpu
->arch
.exception
.pending
= false;
10283 vcpu
->arch
.exception
.injected
= true;
10285 can_inject
= false;
10288 /* Don't inject interrupts if the user asked to avoid doing so */
10289 if (vcpu
->guest_debug
& KVM_GUESTDBG_BLOCKIRQ
)
10293 * Finally, inject interrupt events. If an event cannot be injected
10294 * due to architectural conditions (e.g. IF=0) a window-open exit
10295 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending
10296 * and can architecturally be injected, but we cannot do it right now:
10297 * an interrupt could have arrived just now and we have to inject it
10298 * as a vmexit, or there could already an event in the queue, which is
10299 * indicated by can_inject. In that case we request an immediate exit
10300 * in order to make progress and get back here for another iteration.
10301 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
10303 #ifdef CONFIG_KVM_SMM
10304 if (vcpu
->arch
.smi_pending
) {
10305 r
= can_inject
? static_call(kvm_x86_smi_allowed
)(vcpu
, true) : -EBUSY
;
10309 vcpu
->arch
.smi_pending
= false;
10310 ++vcpu
->arch
.smi_count
;
10312 can_inject
= false;
10314 static_call(kvm_x86_enable_smi_window
)(vcpu
);
10318 if (vcpu
->arch
.nmi_pending
) {
10319 r
= can_inject
? static_call(kvm_x86_nmi_allowed
)(vcpu
, true) : -EBUSY
;
10323 --vcpu
->arch
.nmi_pending
;
10324 vcpu
->arch
.nmi_injected
= true;
10325 static_call(kvm_x86_inject_nmi
)(vcpu
);
10326 can_inject
= false;
10327 WARN_ON(static_call(kvm_x86_nmi_allowed
)(vcpu
, true) < 0);
10329 if (vcpu
->arch
.nmi_pending
)
10330 static_call(kvm_x86_enable_nmi_window
)(vcpu
);
10333 if (kvm_cpu_has_injectable_intr(vcpu
)) {
10334 r
= can_inject
? static_call(kvm_x86_interrupt_allowed
)(vcpu
, true) : -EBUSY
;
10338 int irq
= kvm_cpu_get_interrupt(vcpu
);
10340 if (!WARN_ON_ONCE(irq
== -1)) {
10341 kvm_queue_interrupt(vcpu
, irq
, false);
10342 static_call(kvm_x86_inject_irq
)(vcpu
, false);
10343 WARN_ON(static_call(kvm_x86_interrupt_allowed
)(vcpu
, true) < 0);
10346 if (kvm_cpu_has_injectable_intr(vcpu
))
10347 static_call(kvm_x86_enable_irq_window
)(vcpu
);
10350 if (is_guest_mode(vcpu
) &&
10351 kvm_x86_ops
.nested_ops
->has_events
&&
10352 kvm_x86_ops
.nested_ops
->has_events(vcpu
))
10353 *req_immediate_exit
= true;
10356 * KVM must never queue a new exception while injecting an event; KVM
10357 * is done emulating and should only propagate the to-be-injected event
10358 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an
10359 * infinite loop as KVM will bail from VM-Enter to inject the pending
10360 * exception and start the cycle all over.
10362 * Exempt triple faults as they have special handling and won't put the
10363 * vCPU into an infinite loop. Triple fault can be queued when running
10364 * VMX without unrestricted guest, as that requires KVM to emulate Real
10365 * Mode events (see kvm_inject_realmode_interrupt()).
10367 WARN_ON_ONCE(vcpu
->arch
.exception
.pending
||
10368 vcpu
->arch
.exception_vmexit
.pending
);
10373 *req_immediate_exit
= true;
10379 static void process_nmi(struct kvm_vcpu
*vcpu
)
10381 unsigned int limit
;
10384 * x86 is limited to one NMI pending, but because KVM can't react to
10385 * incoming NMIs as quickly as bare metal, e.g. if the vCPU is
10386 * scheduled out, KVM needs to play nice with two queued NMIs showing
10387 * up at the same time. To handle this scenario, allow two NMIs to be
10388 * (temporarily) pending so long as NMIs are not blocked and KVM is not
10389 * waiting for a previous NMI injection to complete (which effectively
10390 * blocks NMIs). KVM will immediately inject one of the two NMIs, and
10391 * will request an NMI window to handle the second NMI.
10393 if (static_call(kvm_x86_get_nmi_mask
)(vcpu
) || vcpu
->arch
.nmi_injected
)
10399 * Adjust the limit to account for pending virtual NMIs, which aren't
10400 * tracked in vcpu->arch.nmi_pending.
10402 if (static_call(kvm_x86_is_vnmi_pending
)(vcpu
))
10405 vcpu
->arch
.nmi_pending
+= atomic_xchg(&vcpu
->arch
.nmi_queued
, 0);
10406 vcpu
->arch
.nmi_pending
= min(vcpu
->arch
.nmi_pending
, limit
);
10408 if (vcpu
->arch
.nmi_pending
&&
10409 (static_call(kvm_x86_set_vnmi_pending
)(vcpu
)))
10410 vcpu
->arch
.nmi_pending
--;
10412 if (vcpu
->arch
.nmi_pending
)
10413 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
10416 /* Return total number of NMIs pending injection to the VM */
10417 int kvm_get_nr_pending_nmis(struct kvm_vcpu
*vcpu
)
10419 return vcpu
->arch
.nmi_pending
+
10420 static_call(kvm_x86_is_vnmi_pending
)(vcpu
);
10423 void kvm_make_scan_ioapic_request_mask(struct kvm
*kvm
,
10424 unsigned long *vcpu_bitmap
)
10426 kvm_make_vcpus_request_mask(kvm
, KVM_REQ_SCAN_IOAPIC
, vcpu_bitmap
);
10429 void kvm_make_scan_ioapic_request(struct kvm
*kvm
)
10431 kvm_make_all_cpus_request(kvm
, KVM_REQ_SCAN_IOAPIC
);
10434 void __kvm_vcpu_update_apicv(struct kvm_vcpu
*vcpu
)
10436 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
10439 if (!lapic_in_kernel(vcpu
))
10442 down_read(&vcpu
->kvm
->arch
.apicv_update_lock
);
10445 /* Do not activate APICV when APIC is disabled */
10446 activate
= kvm_vcpu_apicv_activated(vcpu
) &&
10447 (kvm_get_apic_mode(vcpu
) != LAPIC_MODE_DISABLED
);
10449 if (apic
->apicv_active
== activate
)
10452 apic
->apicv_active
= activate
;
10453 kvm_apic_update_apicv(vcpu
);
10454 static_call(kvm_x86_refresh_apicv_exec_ctrl
)(vcpu
);
10457 * When APICv gets disabled, we may still have injected interrupts
10458 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
10459 * still active when the interrupt got accepted. Make sure
10460 * kvm_check_and_inject_events() is called to check for that.
10462 if (!apic
->apicv_active
)
10463 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
10467 up_read(&vcpu
->kvm
->arch
.apicv_update_lock
);
10469 EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv
);
10471 static void kvm_vcpu_update_apicv(struct kvm_vcpu
*vcpu
)
10473 if (!lapic_in_kernel(vcpu
))
10477 * Due to sharing page tables across vCPUs, the xAPIC memslot must be
10478 * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but
10479 * and hardware doesn't support x2APIC virtualization. E.g. some AMD
10480 * CPUs support AVIC but not x2APIC. KVM still allows enabling AVIC in
10481 * this case so that KVM can the AVIC doorbell to inject interrupts to
10482 * running vCPUs, but KVM must not create SPTEs for the APIC base as
10483 * the vCPU would incorrectly be able to access the vAPIC page via MMIO
10484 * despite being in x2APIC mode. For simplicity, inhibiting the APIC
10485 * access page is sticky.
10487 if (apic_x2apic_mode(vcpu
->arch
.apic
) &&
10488 kvm_x86_ops
.allow_apicv_in_x2apic_without_x2apic_virtualization
)
10489 kvm_inhibit_apic_access_page(vcpu
);
10491 __kvm_vcpu_update_apicv(vcpu
);
10494 void __kvm_set_or_clear_apicv_inhibit(struct kvm
*kvm
,
10495 enum kvm_apicv_inhibit reason
, bool set
)
10497 unsigned long old
, new;
10499 lockdep_assert_held_write(&kvm
->arch
.apicv_update_lock
);
10501 if (!(kvm_x86_ops
.required_apicv_inhibits
& BIT(reason
)))
10504 old
= new = kvm
->arch
.apicv_inhibit_reasons
;
10506 set_or_clear_apicv_inhibit(&new, reason
, set
);
10508 if (!!old
!= !!new) {
10510 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
10511 * false positives in the sanity check WARN in svm_vcpu_run().
10512 * This task will wait for all vCPUs to ack the kick IRQ before
10513 * updating apicv_inhibit_reasons, and all other vCPUs will
10514 * block on acquiring apicv_update_lock so that vCPUs can't
10515 * redo svm_vcpu_run() without seeing the new inhibit state.
10517 * Note, holding apicv_update_lock and taking it in the read
10518 * side (handling the request) also prevents other vCPUs from
10519 * servicing the request with a stale apicv_inhibit_reasons.
10521 kvm_make_all_cpus_request(kvm
, KVM_REQ_APICV_UPDATE
);
10522 kvm
->arch
.apicv_inhibit_reasons
= new;
10524 unsigned long gfn
= gpa_to_gfn(APIC_DEFAULT_PHYS_BASE
);
10525 int idx
= srcu_read_lock(&kvm
->srcu
);
10527 kvm_zap_gfn_range(kvm
, gfn
, gfn
+1);
10528 srcu_read_unlock(&kvm
->srcu
, idx
);
10531 kvm
->arch
.apicv_inhibit_reasons
= new;
10535 void kvm_set_or_clear_apicv_inhibit(struct kvm
*kvm
,
10536 enum kvm_apicv_inhibit reason
, bool set
)
10541 down_write(&kvm
->arch
.apicv_update_lock
);
10542 __kvm_set_or_clear_apicv_inhibit(kvm
, reason
, set
);
10543 up_write(&kvm
->arch
.apicv_update_lock
);
10545 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit
);
10547 static void vcpu_scan_ioapic(struct kvm_vcpu
*vcpu
)
10549 if (!kvm_apic_present(vcpu
))
10552 bitmap_zero(vcpu
->arch
.ioapic_handled_vectors
, 256);
10554 if (irqchip_split(vcpu
->kvm
))
10555 kvm_scan_ioapic_routes(vcpu
, vcpu
->arch
.ioapic_handled_vectors
);
10557 static_call_cond(kvm_x86_sync_pir_to_irr
)(vcpu
);
10558 if (ioapic_in_kernel(vcpu
->kvm
))
10559 kvm_ioapic_scan_entry(vcpu
, vcpu
->arch
.ioapic_handled_vectors
);
10562 if (is_guest_mode(vcpu
))
10563 vcpu
->arch
.load_eoi_exitmap_pending
= true;
10565 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP
, vcpu
);
10568 static void vcpu_load_eoi_exitmap(struct kvm_vcpu
*vcpu
)
10570 u64 eoi_exit_bitmap
[4];
10572 if (!kvm_apic_hw_enabled(vcpu
->arch
.apic
))
10575 if (to_hv_vcpu(vcpu
)) {
10576 bitmap_or((ulong
*)eoi_exit_bitmap
,
10577 vcpu
->arch
.ioapic_handled_vectors
,
10578 to_hv_synic(vcpu
)->vec_bitmap
, 256);
10579 static_call_cond(kvm_x86_load_eoi_exitmap
)(vcpu
, eoi_exit_bitmap
);
10583 static_call_cond(kvm_x86_load_eoi_exitmap
)(
10584 vcpu
, (u64
*)vcpu
->arch
.ioapic_handled_vectors
);
10587 void kvm_arch_guest_memory_reclaimed(struct kvm
*kvm
)
10589 static_call_cond(kvm_x86_guest_memory_reclaimed
)(kvm
);
10592 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu
*vcpu
)
10594 if (!lapic_in_kernel(vcpu
))
10597 static_call_cond(kvm_x86_set_apic_access_page_addr
)(vcpu
);
10600 void __kvm_request_immediate_exit(struct kvm_vcpu
*vcpu
)
10602 smp_send_reschedule(vcpu
->cpu
);
10604 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit
);
10607 * Called within kvm->srcu read side.
10608 * Returns 1 to let vcpu_run() continue the guest execution loop without
10609 * exiting to the userspace. Otherwise, the value will be returned to the
10612 static int vcpu_enter_guest(struct kvm_vcpu
*vcpu
)
10616 dm_request_for_irq_injection(vcpu
) &&
10617 kvm_cpu_accept_dm_intr(vcpu
);
10618 fastpath_t exit_fastpath
;
10620 bool req_immediate_exit
= false;
10622 if (kvm_request_pending(vcpu
)) {
10623 if (kvm_check_request(KVM_REQ_VM_DEAD
, vcpu
)) {
10628 if (kvm_dirty_ring_check_request(vcpu
)) {
10633 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES
, vcpu
)) {
10634 if (unlikely(!kvm_x86_ops
.nested_ops
->get_nested_state_pages(vcpu
))) {
10639 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS
, vcpu
))
10640 kvm_mmu_free_obsolete_roots(vcpu
);
10641 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER
, vcpu
))
10642 __kvm_migrate_timers(vcpu
);
10643 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
))
10644 kvm_update_masterclock(vcpu
->kvm
);
10645 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE
, vcpu
))
10646 kvm_gen_kvmclock_update(vcpu
);
10647 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE
, vcpu
)) {
10648 r
= kvm_guest_time_update(vcpu
);
10652 if (kvm_check_request(KVM_REQ_MMU_SYNC
, vcpu
))
10653 kvm_mmu_sync_roots(vcpu
);
10654 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD
, vcpu
))
10655 kvm_mmu_load_pgd(vcpu
);
10658 * Note, the order matters here, as flushing "all" TLB entries
10659 * also flushes the "current" TLB entries, i.e. servicing the
10660 * flush "all" will clear any request to flush "current".
10662 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
10663 kvm_vcpu_flush_tlb_all(vcpu
);
10665 kvm_service_local_tlb_flush_requests(vcpu
);
10668 * Fall back to a "full" guest flush if Hyper-V's precise
10669 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but
10670 * the flushes are considered "remote" and not "local" because
10671 * the requests can be initiated from other vCPUs.
10673 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH
, vcpu
) &&
10674 kvm_hv_vcpu_flush_tlb(vcpu
))
10675 kvm_vcpu_flush_tlb_guest(vcpu
);
10677 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS
, vcpu
)) {
10678 vcpu
->run
->exit_reason
= KVM_EXIT_TPR_ACCESS
;
10682 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT
, vcpu
)) {
10683 if (is_guest_mode(vcpu
))
10684 kvm_x86_ops
.nested_ops
->triple_fault(vcpu
);
10686 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT
, vcpu
)) {
10687 vcpu
->run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
10688 vcpu
->mmio_needed
= 0;
10693 if (kvm_check_request(KVM_REQ_APF_HALT
, vcpu
)) {
10694 /* Page is swapped out. Do synthetic halt */
10695 vcpu
->arch
.apf
.halted
= true;
10699 if (kvm_check_request(KVM_REQ_STEAL_UPDATE
, vcpu
))
10700 record_steal_time(vcpu
);
10701 if (kvm_check_request(KVM_REQ_PMU
, vcpu
))
10702 kvm_pmu_handle_event(vcpu
);
10703 if (kvm_check_request(KVM_REQ_PMI
, vcpu
))
10704 kvm_pmu_deliver_pmi(vcpu
);
10705 #ifdef CONFIG_KVM_SMM
10706 if (kvm_check_request(KVM_REQ_SMI
, vcpu
))
10709 if (kvm_check_request(KVM_REQ_NMI
, vcpu
))
10711 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT
, vcpu
)) {
10712 BUG_ON(vcpu
->arch
.pending_ioapic_eoi
> 255);
10713 if (test_bit(vcpu
->arch
.pending_ioapic_eoi
,
10714 vcpu
->arch
.ioapic_handled_vectors
)) {
10715 vcpu
->run
->exit_reason
= KVM_EXIT_IOAPIC_EOI
;
10716 vcpu
->run
->eoi
.vector
=
10717 vcpu
->arch
.pending_ioapic_eoi
;
10722 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC
, vcpu
))
10723 vcpu_scan_ioapic(vcpu
);
10724 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP
, vcpu
))
10725 vcpu_load_eoi_exitmap(vcpu
);
10726 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
))
10727 kvm_vcpu_reload_apic_access_page(vcpu
);
10728 if (kvm_check_request(KVM_REQ_HV_CRASH
, vcpu
)) {
10729 vcpu
->run
->exit_reason
= KVM_EXIT_SYSTEM_EVENT
;
10730 vcpu
->run
->system_event
.type
= KVM_SYSTEM_EVENT_CRASH
;
10731 vcpu
->run
->system_event
.ndata
= 0;
10735 if (kvm_check_request(KVM_REQ_HV_RESET
, vcpu
)) {
10736 vcpu
->run
->exit_reason
= KVM_EXIT_SYSTEM_EVENT
;
10737 vcpu
->run
->system_event
.type
= KVM_SYSTEM_EVENT_RESET
;
10738 vcpu
->run
->system_event
.ndata
= 0;
10742 if (kvm_check_request(KVM_REQ_HV_EXIT
, vcpu
)) {
10743 struct kvm_vcpu_hv
*hv_vcpu
= to_hv_vcpu(vcpu
);
10745 vcpu
->run
->exit_reason
= KVM_EXIT_HYPERV
;
10746 vcpu
->run
->hyperv
= hv_vcpu
->exit
;
10752 * KVM_REQ_HV_STIMER has to be processed after
10753 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
10754 * depend on the guest clock being up-to-date
10756 if (kvm_check_request(KVM_REQ_HV_STIMER
, vcpu
))
10757 kvm_hv_process_stimers(vcpu
);
10758 if (kvm_check_request(KVM_REQ_APICV_UPDATE
, vcpu
))
10759 kvm_vcpu_update_apicv(vcpu
);
10760 if (kvm_check_request(KVM_REQ_APF_READY
, vcpu
))
10761 kvm_check_async_pf_completion(vcpu
);
10762 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED
, vcpu
))
10763 static_call(kvm_x86_msr_filter_changed
)(vcpu
);
10765 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING
, vcpu
))
10766 static_call(kvm_x86_update_cpu_dirty_logging
)(vcpu
);
10769 if (kvm_check_request(KVM_REQ_EVENT
, vcpu
) || req_int_win
||
10770 kvm_xen_has_interrupt(vcpu
)) {
10771 ++vcpu
->stat
.req_event
;
10772 r
= kvm_apic_accept_events(vcpu
);
10777 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
) {
10782 r
= kvm_check_and_inject_events(vcpu
, &req_immediate_exit
);
10788 static_call(kvm_x86_enable_irq_window
)(vcpu
);
10790 if (kvm_lapic_enabled(vcpu
)) {
10791 update_cr8_intercept(vcpu
);
10792 kvm_lapic_sync_to_vapic(vcpu
);
10796 r
= kvm_mmu_reload(vcpu
);
10798 goto cancel_injection
;
10803 static_call(kvm_x86_prepare_switch_to_guest
)(vcpu
);
10806 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
10807 * IPI are then delayed after guest entry, which ensures that they
10808 * result in virtual interrupt delivery.
10810 local_irq_disable();
10812 /* Store vcpu->apicv_active before vcpu->mode. */
10813 smp_store_release(&vcpu
->mode
, IN_GUEST_MODE
);
10815 kvm_vcpu_srcu_read_unlock(vcpu
);
10818 * 1) We should set ->mode before checking ->requests. Please see
10819 * the comment in kvm_vcpu_exiting_guest_mode().
10821 * 2) For APICv, we should set ->mode before checking PID.ON. This
10822 * pairs with the memory barrier implicit in pi_test_and_set_on
10823 * (see vmx_deliver_posted_interrupt).
10825 * 3) This also orders the write to mode from any reads to the page
10826 * tables done while the VCPU is running. Please see the comment
10827 * in kvm_flush_remote_tlbs.
10829 smp_mb__after_srcu_read_unlock();
10832 * Process pending posted interrupts to handle the case where the
10833 * notification IRQ arrived in the host, or was never sent (because the
10834 * target vCPU wasn't running). Do this regardless of the vCPU's APICv
10835 * status, KVM doesn't update assigned devices when APICv is inhibited,
10836 * i.e. they can post interrupts even if APICv is temporarily disabled.
10838 if (kvm_lapic_enabled(vcpu
))
10839 static_call_cond(kvm_x86_sync_pir_to_irr
)(vcpu
);
10841 if (kvm_vcpu_exit_request(vcpu
)) {
10842 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
10844 local_irq_enable();
10846 kvm_vcpu_srcu_read_lock(vcpu
);
10848 goto cancel_injection
;
10851 if (req_immediate_exit
) {
10852 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
10853 static_call(kvm_x86_request_immediate_exit
)(vcpu
);
10856 fpregs_assert_state_consistent();
10857 if (test_thread_flag(TIF_NEED_FPU_LOAD
))
10858 switch_fpu_return();
10860 if (vcpu
->arch
.guest_fpu
.xfd_err
)
10861 wrmsrl(MSR_IA32_XFD_ERR
, vcpu
->arch
.guest_fpu
.xfd_err
);
10863 if (unlikely(vcpu
->arch
.switch_db_regs
)) {
10864 set_debugreg(0, 7);
10865 set_debugreg(vcpu
->arch
.eff_db
[0], 0);
10866 set_debugreg(vcpu
->arch
.eff_db
[1], 1);
10867 set_debugreg(vcpu
->arch
.eff_db
[2], 2);
10868 set_debugreg(vcpu
->arch
.eff_db
[3], 3);
10869 } else if (unlikely(hw_breakpoint_active())) {
10870 set_debugreg(0, 7);
10873 guest_timing_enter_irqoff();
10877 * Assert that vCPU vs. VM APICv state is consistent. An APICv
10878 * update must kick and wait for all vCPUs before toggling the
10879 * per-VM state, and responsing vCPUs must wait for the update
10880 * to complete before servicing KVM_REQ_APICV_UPDATE.
10882 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu
) != kvm_vcpu_apicv_active(vcpu
)) &&
10883 (kvm_get_apic_mode(vcpu
) != LAPIC_MODE_DISABLED
));
10885 exit_fastpath
= static_call(kvm_x86_vcpu_run
)(vcpu
);
10886 if (likely(exit_fastpath
!= EXIT_FASTPATH_REENTER_GUEST
))
10889 if (kvm_lapic_enabled(vcpu
))
10890 static_call_cond(kvm_x86_sync_pir_to_irr
)(vcpu
);
10892 if (unlikely(kvm_vcpu_exit_request(vcpu
))) {
10893 exit_fastpath
= EXIT_FASTPATH_EXIT_HANDLED
;
10897 /* Note, VM-Exits that go down the "slow" path are accounted below. */
10898 ++vcpu
->stat
.exits
;
10902 * Do this here before restoring debug registers on the host. And
10903 * since we do this before handling the vmexit, a DR access vmexit
10904 * can (a) read the correct value of the debug registers, (b) set
10905 * KVM_DEBUGREG_WONT_EXIT again.
10907 if (unlikely(vcpu
->arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
)) {
10908 WARN_ON(vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
);
10909 static_call(kvm_x86_sync_dirty_debug_regs
)(vcpu
);
10910 kvm_update_dr0123(vcpu
);
10911 kvm_update_dr7(vcpu
);
10915 * If the guest has used debug registers, at least dr7
10916 * will be disabled while returning to the host.
10917 * If we don't have active breakpoints in the host, we don't
10918 * care about the messed up debug address registers. But if
10919 * we have some of them active, restore the old state.
10921 if (hw_breakpoint_active())
10922 hw_breakpoint_restore();
10924 vcpu
->arch
.last_vmentry_cpu
= vcpu
->cpu
;
10925 vcpu
->arch
.last_guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
10927 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
10931 * Sync xfd before calling handle_exit_irqoff() which may
10932 * rely on the fact that guest_fpu::xfd is up-to-date (e.g.
10933 * in #NM irqoff handler).
10935 if (vcpu
->arch
.xfd_no_write_intercept
)
10936 fpu_sync_guest_vmexit_xfd_state();
10938 static_call(kvm_x86_handle_exit_irqoff
)(vcpu
);
10940 if (vcpu
->arch
.guest_fpu
.xfd_err
)
10941 wrmsrl(MSR_IA32_XFD_ERR
, 0);
10944 * Consume any pending interrupts, including the possible source of
10945 * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
10946 * An instruction is required after local_irq_enable() to fully unblock
10947 * interrupts on processors that implement an interrupt shadow, the
10948 * stat.exits increment will do nicely.
10950 kvm_before_interrupt(vcpu
, KVM_HANDLING_IRQ
);
10951 local_irq_enable();
10952 ++vcpu
->stat
.exits
;
10953 local_irq_disable();
10954 kvm_after_interrupt(vcpu
);
10957 * Wait until after servicing IRQs to account guest time so that any
10958 * ticks that occurred while running the guest are properly accounted
10959 * to the guest. Waiting until IRQs are enabled degrades the accuracy
10960 * of accounting via context tracking, but the loss of accuracy is
10961 * acceptable for all known use cases.
10963 guest_timing_exit_irqoff();
10965 local_irq_enable();
10968 kvm_vcpu_srcu_read_lock(vcpu
);
10971 * Profile KVM exit RIPs:
10973 if (unlikely(prof_on
== KVM_PROFILING
)) {
10974 unsigned long rip
= kvm_rip_read(vcpu
);
10975 profile_hit(KVM_PROFILING
, (void *)rip
);
10978 if (unlikely(vcpu
->arch
.tsc_always_catchup
))
10979 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
10981 if (vcpu
->arch
.apic_attention
)
10982 kvm_lapic_sync_from_vapic(vcpu
);
10984 r
= static_call(kvm_x86_handle_exit
)(vcpu
, exit_fastpath
);
10988 if (req_immediate_exit
)
10989 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
10990 static_call(kvm_x86_cancel_injection
)(vcpu
);
10991 if (unlikely(vcpu
->arch
.apic_attention
))
10992 kvm_lapic_sync_from_vapic(vcpu
);
10997 /* Called within kvm->srcu read side. */
10998 static inline int vcpu_block(struct kvm_vcpu
*vcpu
)
11002 if (!kvm_arch_vcpu_runnable(vcpu
)) {
11004 * Switch to the software timer before halt-polling/blocking as
11005 * the guest's timer may be a break event for the vCPU, and the
11006 * hypervisor timer runs only when the CPU is in guest mode.
11007 * Switch before halt-polling so that KVM recognizes an expired
11008 * timer before blocking.
11010 hv_timer
= kvm_lapic_hv_timer_in_use(vcpu
);
11012 kvm_lapic_switch_to_sw_timer(vcpu
);
11014 kvm_vcpu_srcu_read_unlock(vcpu
);
11015 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
11016 kvm_vcpu_halt(vcpu
);
11018 kvm_vcpu_block(vcpu
);
11019 kvm_vcpu_srcu_read_lock(vcpu
);
11022 kvm_lapic_switch_to_hv_timer(vcpu
);
11025 * If the vCPU is not runnable, a signal or another host event
11026 * of some kind is pending; service it without changing the
11027 * vCPU's activity state.
11029 if (!kvm_arch_vcpu_runnable(vcpu
))
11034 * Evaluate nested events before exiting the halted state. This allows
11035 * the halt state to be recorded properly in the VMCS12's activity
11036 * state field (AMD does not have a similar field and a VM-Exit always
11037 * causes a spurious wakeup from HLT).
11039 if (is_guest_mode(vcpu
)) {
11040 if (kvm_check_nested_events(vcpu
) < 0)
11044 if (kvm_apic_accept_events(vcpu
) < 0)
11046 switch(vcpu
->arch
.mp_state
) {
11047 case KVM_MP_STATE_HALTED
:
11048 case KVM_MP_STATE_AP_RESET_HOLD
:
11049 vcpu
->arch
.pv
.pv_unhalted
= false;
11050 vcpu
->arch
.mp_state
=
11051 KVM_MP_STATE_RUNNABLE
;
11053 case KVM_MP_STATE_RUNNABLE
:
11054 vcpu
->arch
.apf
.halted
= false;
11056 case KVM_MP_STATE_INIT_RECEIVED
:
11065 static inline bool kvm_vcpu_running(struct kvm_vcpu
*vcpu
)
11067 return (vcpu
->arch
.mp_state
== KVM_MP_STATE_RUNNABLE
&&
11068 !vcpu
->arch
.apf
.halted
);
11071 /* Called within kvm->srcu read side. */
11072 static int vcpu_run(struct kvm_vcpu
*vcpu
)
11076 vcpu
->arch
.l1tf_flush_l1d
= true;
11080 * If another guest vCPU requests a PV TLB flush in the middle
11081 * of instruction emulation, the rest of the emulation could
11082 * use a stale page translation. Assume that any code after
11083 * this point can start executing an instruction.
11085 vcpu
->arch
.at_instruction_boundary
= false;
11086 if (kvm_vcpu_running(vcpu
)) {
11087 r
= vcpu_enter_guest(vcpu
);
11089 r
= vcpu_block(vcpu
);
11095 kvm_clear_request(KVM_REQ_UNBLOCK
, vcpu
);
11096 if (kvm_xen_has_pending_events(vcpu
))
11097 kvm_xen_inject_pending_events(vcpu
);
11099 if (kvm_cpu_has_pending_timer(vcpu
))
11100 kvm_inject_pending_timer_irqs(vcpu
);
11102 if (dm_request_for_irq_injection(vcpu
) &&
11103 kvm_vcpu_ready_for_interrupt_injection(vcpu
)) {
11105 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
11106 ++vcpu
->stat
.request_irq_exits
;
11110 if (__xfer_to_guest_mode_work_pending()) {
11111 kvm_vcpu_srcu_read_unlock(vcpu
);
11112 r
= xfer_to_guest_mode_handle_work(vcpu
);
11113 kvm_vcpu_srcu_read_lock(vcpu
);
11122 static inline int complete_emulated_io(struct kvm_vcpu
*vcpu
)
11124 return kvm_emulate_instruction(vcpu
, EMULTYPE_NO_DECODE
);
11127 static int complete_emulated_pio(struct kvm_vcpu
*vcpu
)
11129 BUG_ON(!vcpu
->arch
.pio
.count
);
11131 return complete_emulated_io(vcpu
);
11135 * Implements the following, as a state machine:
11138 * for each fragment
11139 * for each mmio piece in the fragment
11146 * for each fragment
11147 * for each mmio piece in the fragment
11152 static int complete_emulated_mmio(struct kvm_vcpu
*vcpu
)
11154 struct kvm_run
*run
= vcpu
->run
;
11155 struct kvm_mmio_fragment
*frag
;
11158 BUG_ON(!vcpu
->mmio_needed
);
11160 /* Complete previous fragment */
11161 frag
= &vcpu
->mmio_fragments
[vcpu
->mmio_cur_fragment
];
11162 len
= min(8u, frag
->len
);
11163 if (!vcpu
->mmio_is_write
)
11164 memcpy(frag
->data
, run
->mmio
.data
, len
);
11166 if (frag
->len
<= 8) {
11167 /* Switch to the next fragment. */
11169 vcpu
->mmio_cur_fragment
++;
11171 /* Go forward to the next mmio piece. */
11177 if (vcpu
->mmio_cur_fragment
>= vcpu
->mmio_nr_fragments
) {
11178 vcpu
->mmio_needed
= 0;
11180 /* FIXME: return into emulator if single-stepping. */
11181 if (vcpu
->mmio_is_write
)
11183 vcpu
->mmio_read_completed
= 1;
11184 return complete_emulated_io(vcpu
);
11187 run
->exit_reason
= KVM_EXIT_MMIO
;
11188 run
->mmio
.phys_addr
= frag
->gpa
;
11189 if (vcpu
->mmio_is_write
)
11190 memcpy(run
->mmio
.data
, frag
->data
, min(8u, frag
->len
));
11191 run
->mmio
.len
= min(8u, frag
->len
);
11192 run
->mmio
.is_write
= vcpu
->mmio_is_write
;
11193 vcpu
->arch
.complete_userspace_io
= complete_emulated_mmio
;
11197 /* Swap (qemu) user FPU context for the guest FPU context. */
11198 static void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
)
11200 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */
11201 fpu_swap_kvm_fpstate(&vcpu
->arch
.guest_fpu
, true);
11205 /* When vcpu_run ends, restore user space FPU context. */
11206 static void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
)
11208 fpu_swap_kvm_fpstate(&vcpu
->arch
.guest_fpu
, false);
11209 ++vcpu
->stat
.fpu_reload
;
11213 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
11215 struct kvm_queued_exception
*ex
= &vcpu
->arch
.exception
;
11216 struct kvm_run
*kvm_run
= vcpu
->run
;
11220 kvm_sigset_activate(vcpu
);
11221 kvm_run
->flags
= 0;
11222 kvm_load_guest_fpu(vcpu
);
11224 kvm_vcpu_srcu_read_lock(vcpu
);
11225 if (unlikely(vcpu
->arch
.mp_state
== KVM_MP_STATE_UNINITIALIZED
)) {
11226 if (kvm_run
->immediate_exit
) {
11232 * Don't bother switching APIC timer emulation from the
11233 * hypervisor timer to the software timer, the only way for the
11234 * APIC timer to be active is if userspace stuffed vCPU state,
11235 * i.e. put the vCPU into a nonsensical state. Only an INIT
11236 * will transition the vCPU out of UNINITIALIZED (without more
11237 * state stuffing from userspace), which will reset the local
11238 * APIC and thus cancel the timer or drop the IRQ (if the timer
11239 * already expired).
11241 kvm_vcpu_srcu_read_unlock(vcpu
);
11242 kvm_vcpu_block(vcpu
);
11243 kvm_vcpu_srcu_read_lock(vcpu
);
11245 if (kvm_apic_accept_events(vcpu
) < 0) {
11250 if (signal_pending(current
)) {
11252 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
11253 ++vcpu
->stat
.signal_exits
;
11258 if ((kvm_run
->kvm_valid_regs
& ~KVM_SYNC_X86_VALID_FIELDS
) ||
11259 (kvm_run
->kvm_dirty_regs
& ~KVM_SYNC_X86_VALID_FIELDS
)) {
11264 if (kvm_run
->kvm_dirty_regs
) {
11265 r
= sync_regs(vcpu
);
11270 /* re-sync apic's tpr */
11271 if (!lapic_in_kernel(vcpu
)) {
11272 if (kvm_set_cr8(vcpu
, kvm_run
->cr8
) != 0) {
11279 * If userspace set a pending exception and L2 is active, convert it to
11280 * a pending VM-Exit if L1 wants to intercept the exception.
11282 if (vcpu
->arch
.exception_from_userspace
&& is_guest_mode(vcpu
) &&
11283 kvm_x86_ops
.nested_ops
->is_exception_vmexit(vcpu
, ex
->vector
,
11285 kvm_queue_exception_vmexit(vcpu
, ex
->vector
,
11286 ex
->has_error_code
, ex
->error_code
,
11287 ex
->has_payload
, ex
->payload
);
11288 ex
->injected
= false;
11289 ex
->pending
= false;
11291 vcpu
->arch
.exception_from_userspace
= false;
11293 if (unlikely(vcpu
->arch
.complete_userspace_io
)) {
11294 int (*cui
)(struct kvm_vcpu
*) = vcpu
->arch
.complete_userspace_io
;
11295 vcpu
->arch
.complete_userspace_io
= NULL
;
11300 WARN_ON_ONCE(vcpu
->arch
.pio
.count
);
11301 WARN_ON_ONCE(vcpu
->mmio_needed
);
11304 if (kvm_run
->immediate_exit
) {
11309 r
= static_call(kvm_x86_vcpu_pre_run
)(vcpu
);
11313 r
= vcpu_run(vcpu
);
11316 kvm_put_guest_fpu(vcpu
);
11317 if (kvm_run
->kvm_valid_regs
)
11319 post_kvm_run_save(vcpu
);
11320 kvm_vcpu_srcu_read_unlock(vcpu
);
11322 kvm_sigset_deactivate(vcpu
);
11327 static void __get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
11329 if (vcpu
->arch
.emulate_regs_need_sync_to_vcpu
) {
11331 * We are here if userspace calls get_regs() in the middle of
11332 * instruction emulation. Registers state needs to be copied
11333 * back from emulation context to vcpu. Userspace shouldn't do
11334 * that usually, but some bad designed PV devices (vmware
11335 * backdoor interface) need this to work
11337 emulator_writeback_register_cache(vcpu
->arch
.emulate_ctxt
);
11338 vcpu
->arch
.emulate_regs_need_sync_to_vcpu
= false;
11340 regs
->rax
= kvm_rax_read(vcpu
);
11341 regs
->rbx
= kvm_rbx_read(vcpu
);
11342 regs
->rcx
= kvm_rcx_read(vcpu
);
11343 regs
->rdx
= kvm_rdx_read(vcpu
);
11344 regs
->rsi
= kvm_rsi_read(vcpu
);
11345 regs
->rdi
= kvm_rdi_read(vcpu
);
11346 regs
->rsp
= kvm_rsp_read(vcpu
);
11347 regs
->rbp
= kvm_rbp_read(vcpu
);
11348 #ifdef CONFIG_X86_64
11349 regs
->r8
= kvm_r8_read(vcpu
);
11350 regs
->r9
= kvm_r9_read(vcpu
);
11351 regs
->r10
= kvm_r10_read(vcpu
);
11352 regs
->r11
= kvm_r11_read(vcpu
);
11353 regs
->r12
= kvm_r12_read(vcpu
);
11354 regs
->r13
= kvm_r13_read(vcpu
);
11355 regs
->r14
= kvm_r14_read(vcpu
);
11356 regs
->r15
= kvm_r15_read(vcpu
);
11359 regs
->rip
= kvm_rip_read(vcpu
);
11360 regs
->rflags
= kvm_get_rflags(vcpu
);
11363 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
11366 __get_regs(vcpu
, regs
);
11371 static void __set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
11373 vcpu
->arch
.emulate_regs_need_sync_from_vcpu
= true;
11374 vcpu
->arch
.emulate_regs_need_sync_to_vcpu
= false;
11376 kvm_rax_write(vcpu
, regs
->rax
);
11377 kvm_rbx_write(vcpu
, regs
->rbx
);
11378 kvm_rcx_write(vcpu
, regs
->rcx
);
11379 kvm_rdx_write(vcpu
, regs
->rdx
);
11380 kvm_rsi_write(vcpu
, regs
->rsi
);
11381 kvm_rdi_write(vcpu
, regs
->rdi
);
11382 kvm_rsp_write(vcpu
, regs
->rsp
);
11383 kvm_rbp_write(vcpu
, regs
->rbp
);
11384 #ifdef CONFIG_X86_64
11385 kvm_r8_write(vcpu
, regs
->r8
);
11386 kvm_r9_write(vcpu
, regs
->r9
);
11387 kvm_r10_write(vcpu
, regs
->r10
);
11388 kvm_r11_write(vcpu
, regs
->r11
);
11389 kvm_r12_write(vcpu
, regs
->r12
);
11390 kvm_r13_write(vcpu
, regs
->r13
);
11391 kvm_r14_write(vcpu
, regs
->r14
);
11392 kvm_r15_write(vcpu
, regs
->r15
);
11395 kvm_rip_write(vcpu
, regs
->rip
);
11396 kvm_set_rflags(vcpu
, regs
->rflags
| X86_EFLAGS_FIXED
);
11398 vcpu
->arch
.exception
.pending
= false;
11399 vcpu
->arch
.exception_vmexit
.pending
= false;
11401 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
11404 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
11407 __set_regs(vcpu
, regs
);
11412 static void __get_sregs_common(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
11414 struct desc_ptr dt
;
11416 if (vcpu
->arch
.guest_state_protected
)
11417 goto skip_protected_regs
;
11419 kvm_get_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
11420 kvm_get_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
11421 kvm_get_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
11422 kvm_get_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
11423 kvm_get_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
11424 kvm_get_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
11426 kvm_get_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
11427 kvm_get_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
11429 static_call(kvm_x86_get_idt
)(vcpu
, &dt
);
11430 sregs
->idt
.limit
= dt
.size
;
11431 sregs
->idt
.base
= dt
.address
;
11432 static_call(kvm_x86_get_gdt
)(vcpu
, &dt
);
11433 sregs
->gdt
.limit
= dt
.size
;
11434 sregs
->gdt
.base
= dt
.address
;
11436 sregs
->cr2
= vcpu
->arch
.cr2
;
11437 sregs
->cr3
= kvm_read_cr3(vcpu
);
11439 skip_protected_regs
:
11440 sregs
->cr0
= kvm_read_cr0(vcpu
);
11441 sregs
->cr4
= kvm_read_cr4(vcpu
);
11442 sregs
->cr8
= kvm_get_cr8(vcpu
);
11443 sregs
->efer
= vcpu
->arch
.efer
;
11444 sregs
->apic_base
= kvm_get_apic_base(vcpu
);
11447 static void __get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
11449 __get_sregs_common(vcpu
, sregs
);
11451 if (vcpu
->arch
.guest_state_protected
)
11454 if (vcpu
->arch
.interrupt
.injected
&& !vcpu
->arch
.interrupt
.soft
)
11455 set_bit(vcpu
->arch
.interrupt
.nr
,
11456 (unsigned long *)sregs
->interrupt_bitmap
);
11459 static void __get_sregs2(struct kvm_vcpu
*vcpu
, struct kvm_sregs2
*sregs2
)
11463 __get_sregs_common(vcpu
, (struct kvm_sregs
*)sregs2
);
11465 if (vcpu
->arch
.guest_state_protected
)
11468 if (is_pae_paging(vcpu
)) {
11469 for (i
= 0 ; i
< 4 ; i
++)
11470 sregs2
->pdptrs
[i
] = kvm_pdptr_read(vcpu
, i
);
11471 sregs2
->flags
|= KVM_SREGS2_FLAGS_PDPTRS_VALID
;
11475 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
11476 struct kvm_sregs
*sregs
)
11479 __get_sregs(vcpu
, sregs
);
11484 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
11485 struct kvm_mp_state
*mp_state
)
11490 if (kvm_mpx_supported())
11491 kvm_load_guest_fpu(vcpu
);
11493 r
= kvm_apic_accept_events(vcpu
);
11498 if ((vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
||
11499 vcpu
->arch
.mp_state
== KVM_MP_STATE_AP_RESET_HOLD
) &&
11500 vcpu
->arch
.pv
.pv_unhalted
)
11501 mp_state
->mp_state
= KVM_MP_STATE_RUNNABLE
;
11503 mp_state
->mp_state
= vcpu
->arch
.mp_state
;
11506 if (kvm_mpx_supported())
11507 kvm_put_guest_fpu(vcpu
);
11512 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
11513 struct kvm_mp_state
*mp_state
)
11519 switch (mp_state
->mp_state
) {
11520 case KVM_MP_STATE_UNINITIALIZED
:
11521 case KVM_MP_STATE_HALTED
:
11522 case KVM_MP_STATE_AP_RESET_HOLD
:
11523 case KVM_MP_STATE_INIT_RECEIVED
:
11524 case KVM_MP_STATE_SIPI_RECEIVED
:
11525 if (!lapic_in_kernel(vcpu
))
11529 case KVM_MP_STATE_RUNNABLE
:
11537 * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow
11538 * forcing the guest into INIT/SIPI if those events are supposed to be
11539 * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state
11540 * if an SMI is pending as well.
11542 if ((!kvm_apic_init_sipi_allowed(vcpu
) || vcpu
->arch
.smi_pending
) &&
11543 (mp_state
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
||
11544 mp_state
->mp_state
== KVM_MP_STATE_INIT_RECEIVED
))
11547 if (mp_state
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
) {
11548 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
11549 set_bit(KVM_APIC_SIPI
, &vcpu
->arch
.apic
->pending_events
);
11551 vcpu
->arch
.mp_state
= mp_state
->mp_state
;
11552 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
11560 int kvm_task_switch(struct kvm_vcpu
*vcpu
, u16 tss_selector
, int idt_index
,
11561 int reason
, bool has_error_code
, u32 error_code
)
11563 struct x86_emulate_ctxt
*ctxt
= vcpu
->arch
.emulate_ctxt
;
11566 init_emulate_ctxt(vcpu
);
11568 ret
= emulator_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
11569 has_error_code
, error_code
);
11571 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
11572 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
11573 vcpu
->run
->internal
.ndata
= 0;
11577 kvm_rip_write(vcpu
, ctxt
->eip
);
11578 kvm_set_rflags(vcpu
, ctxt
->eflags
);
11581 EXPORT_SYMBOL_GPL(kvm_task_switch
);
11583 static bool kvm_is_valid_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
11585 if ((sregs
->efer
& EFER_LME
) && (sregs
->cr0
& X86_CR0_PG
)) {
11587 * When EFER.LME and CR0.PG are set, the processor is in
11588 * 64-bit mode (though maybe in a 32-bit code segment).
11589 * CR4.PAE and EFER.LMA must be set.
11591 if (!(sregs
->cr4
& X86_CR4_PAE
) || !(sregs
->efer
& EFER_LMA
))
11593 if (kvm_vcpu_is_illegal_gpa(vcpu
, sregs
->cr3
))
11597 * Not in 64-bit mode: EFER.LMA is clear and the code
11598 * segment cannot be 64-bit.
11600 if (sregs
->efer
& EFER_LMA
|| sregs
->cs
.l
)
11604 return kvm_is_valid_cr4(vcpu
, sregs
->cr4
) &&
11605 kvm_is_valid_cr0(vcpu
, sregs
->cr0
);
11608 static int __set_sregs_common(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
,
11609 int *mmu_reset_needed
, bool update_pdptrs
)
11611 struct msr_data apic_base_msr
;
11613 struct desc_ptr dt
;
11615 if (!kvm_is_valid_sregs(vcpu
, sregs
))
11618 apic_base_msr
.data
= sregs
->apic_base
;
11619 apic_base_msr
.host_initiated
= true;
11620 if (kvm_set_apic_base(vcpu
, &apic_base_msr
))
11623 if (vcpu
->arch
.guest_state_protected
)
11626 dt
.size
= sregs
->idt
.limit
;
11627 dt
.address
= sregs
->idt
.base
;
11628 static_call(kvm_x86_set_idt
)(vcpu
, &dt
);
11629 dt
.size
= sregs
->gdt
.limit
;
11630 dt
.address
= sregs
->gdt
.base
;
11631 static_call(kvm_x86_set_gdt
)(vcpu
, &dt
);
11633 vcpu
->arch
.cr2
= sregs
->cr2
;
11634 *mmu_reset_needed
|= kvm_read_cr3(vcpu
) != sregs
->cr3
;
11635 vcpu
->arch
.cr3
= sregs
->cr3
;
11636 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_CR3
);
11637 static_call_cond(kvm_x86_post_set_cr3
)(vcpu
, sregs
->cr3
);
11639 kvm_set_cr8(vcpu
, sregs
->cr8
);
11641 *mmu_reset_needed
|= vcpu
->arch
.efer
!= sregs
->efer
;
11642 static_call(kvm_x86_set_efer
)(vcpu
, sregs
->efer
);
11644 *mmu_reset_needed
|= kvm_read_cr0(vcpu
) != sregs
->cr0
;
11645 static_call(kvm_x86_set_cr0
)(vcpu
, sregs
->cr0
);
11647 *mmu_reset_needed
|= kvm_read_cr4(vcpu
) != sregs
->cr4
;
11648 static_call(kvm_x86_set_cr4
)(vcpu
, sregs
->cr4
);
11650 if (update_pdptrs
) {
11651 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
11652 if (is_pae_paging(vcpu
)) {
11653 load_pdptrs(vcpu
, kvm_read_cr3(vcpu
));
11654 *mmu_reset_needed
= 1;
11656 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
11659 kvm_set_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
11660 kvm_set_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
11661 kvm_set_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
11662 kvm_set_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
11663 kvm_set_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
11664 kvm_set_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
11666 kvm_set_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
11667 kvm_set_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
11669 update_cr8_intercept(vcpu
);
11671 /* Older userspace won't unhalt the vcpu on reset. */
11672 if (kvm_vcpu_is_bsp(vcpu
) && kvm_rip_read(vcpu
) == 0xfff0 &&
11673 sregs
->cs
.selector
== 0xf000 && sregs
->cs
.base
== 0xffff0000 &&
11674 !is_protmode(vcpu
))
11675 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
11680 static int __set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
11682 int pending_vec
, max_bits
;
11683 int mmu_reset_needed
= 0;
11684 int ret
= __set_sregs_common(vcpu
, sregs
, &mmu_reset_needed
, true);
11689 if (mmu_reset_needed
) {
11690 kvm_mmu_reset_context(vcpu
);
11691 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
11694 max_bits
= KVM_NR_INTERRUPTS
;
11695 pending_vec
= find_first_bit(
11696 (const unsigned long *)sregs
->interrupt_bitmap
, max_bits
);
11698 if (pending_vec
< max_bits
) {
11699 kvm_queue_interrupt(vcpu
, pending_vec
, false);
11700 pr_debug("Set back pending irq %d\n", pending_vec
);
11701 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
11706 static int __set_sregs2(struct kvm_vcpu
*vcpu
, struct kvm_sregs2
*sregs2
)
11708 int mmu_reset_needed
= 0;
11709 bool valid_pdptrs
= sregs2
->flags
& KVM_SREGS2_FLAGS_PDPTRS_VALID
;
11710 bool pae
= (sregs2
->cr0
& X86_CR0_PG
) && (sregs2
->cr4
& X86_CR4_PAE
) &&
11711 !(sregs2
->efer
& EFER_LMA
);
11714 if (sregs2
->flags
& ~KVM_SREGS2_FLAGS_PDPTRS_VALID
)
11717 if (valid_pdptrs
&& (!pae
|| vcpu
->arch
.guest_state_protected
))
11720 ret
= __set_sregs_common(vcpu
, (struct kvm_sregs
*)sregs2
,
11721 &mmu_reset_needed
, !valid_pdptrs
);
11725 if (valid_pdptrs
) {
11726 for (i
= 0; i
< 4 ; i
++)
11727 kvm_pdptr_write(vcpu
, i
, sregs2
->pdptrs
[i
]);
11729 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_PDPTR
);
11730 mmu_reset_needed
= 1;
11731 vcpu
->arch
.pdptrs_from_userspace
= true;
11733 if (mmu_reset_needed
) {
11734 kvm_mmu_reset_context(vcpu
);
11735 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
11740 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
11741 struct kvm_sregs
*sregs
)
11746 ret
= __set_sregs(vcpu
, sregs
);
11751 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm
*kvm
)
11754 struct kvm_vcpu
*vcpu
;
11760 down_write(&kvm
->arch
.apicv_update_lock
);
11762 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
11763 if (vcpu
->guest_debug
& KVM_GUESTDBG_BLOCKIRQ
) {
11768 __kvm_set_or_clear_apicv_inhibit(kvm
, APICV_INHIBIT_REASON_BLOCKIRQ
, set
);
11769 up_write(&kvm
->arch
.apicv_update_lock
);
11772 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
11773 struct kvm_guest_debug
*dbg
)
11775 unsigned long rflags
;
11778 if (vcpu
->arch
.guest_state_protected
)
11783 if (dbg
->control
& (KVM_GUESTDBG_INJECT_DB
| KVM_GUESTDBG_INJECT_BP
)) {
11785 if (kvm_is_exception_pending(vcpu
))
11787 if (dbg
->control
& KVM_GUESTDBG_INJECT_DB
)
11788 kvm_queue_exception(vcpu
, DB_VECTOR
);
11790 kvm_queue_exception(vcpu
, BP_VECTOR
);
11794 * Read rflags as long as potentially injected trace flags are still
11797 rflags
= kvm_get_rflags(vcpu
);
11799 vcpu
->guest_debug
= dbg
->control
;
11800 if (!(vcpu
->guest_debug
& KVM_GUESTDBG_ENABLE
))
11801 vcpu
->guest_debug
= 0;
11803 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
) {
11804 for (i
= 0; i
< KVM_NR_DB_REGS
; ++i
)
11805 vcpu
->arch
.eff_db
[i
] = dbg
->arch
.debugreg
[i
];
11806 vcpu
->arch
.guest_debug_dr7
= dbg
->arch
.debugreg
[7];
11808 for (i
= 0; i
< KVM_NR_DB_REGS
; i
++)
11809 vcpu
->arch
.eff_db
[i
] = vcpu
->arch
.db
[i
];
11811 kvm_update_dr7(vcpu
);
11813 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
)
11814 vcpu
->arch
.singlestep_rip
= kvm_get_linear_rip(vcpu
);
11817 * Trigger an rflags update that will inject or remove the trace
11820 kvm_set_rflags(vcpu
, rflags
);
11822 static_call(kvm_x86_update_exception_bitmap
)(vcpu
);
11824 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu
->kvm
);
11834 * Translate a guest virtual address to a guest physical address.
11836 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
11837 struct kvm_translation
*tr
)
11839 unsigned long vaddr
= tr
->linear_address
;
11845 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
11846 gpa
= kvm_mmu_gva_to_gpa_system(vcpu
, vaddr
, NULL
);
11847 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
11848 tr
->physical_address
= gpa
;
11849 tr
->valid
= gpa
!= INVALID_GPA
;
11857 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
11859 struct fxregs_state
*fxsave
;
11861 if (fpstate_is_confidential(&vcpu
->arch
.guest_fpu
))
11866 fxsave
= &vcpu
->arch
.guest_fpu
.fpstate
->regs
.fxsave
;
11867 memcpy(fpu
->fpr
, fxsave
->st_space
, 128);
11868 fpu
->fcw
= fxsave
->cwd
;
11869 fpu
->fsw
= fxsave
->swd
;
11870 fpu
->ftwx
= fxsave
->twd
;
11871 fpu
->last_opcode
= fxsave
->fop
;
11872 fpu
->last_ip
= fxsave
->rip
;
11873 fpu
->last_dp
= fxsave
->rdp
;
11874 memcpy(fpu
->xmm
, fxsave
->xmm_space
, sizeof(fxsave
->xmm_space
));
11880 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
11882 struct fxregs_state
*fxsave
;
11884 if (fpstate_is_confidential(&vcpu
->arch
.guest_fpu
))
11889 fxsave
= &vcpu
->arch
.guest_fpu
.fpstate
->regs
.fxsave
;
11891 memcpy(fxsave
->st_space
, fpu
->fpr
, 128);
11892 fxsave
->cwd
= fpu
->fcw
;
11893 fxsave
->swd
= fpu
->fsw
;
11894 fxsave
->twd
= fpu
->ftwx
;
11895 fxsave
->fop
= fpu
->last_opcode
;
11896 fxsave
->rip
= fpu
->last_ip
;
11897 fxsave
->rdp
= fpu
->last_dp
;
11898 memcpy(fxsave
->xmm_space
, fpu
->xmm
, sizeof(fxsave
->xmm_space
));
11904 static void store_regs(struct kvm_vcpu
*vcpu
)
11906 BUILD_BUG_ON(sizeof(struct kvm_sync_regs
) > SYNC_REGS_SIZE_BYTES
);
11908 if (vcpu
->run
->kvm_valid_regs
& KVM_SYNC_X86_REGS
)
11909 __get_regs(vcpu
, &vcpu
->run
->s
.regs
.regs
);
11911 if (vcpu
->run
->kvm_valid_regs
& KVM_SYNC_X86_SREGS
)
11912 __get_sregs(vcpu
, &vcpu
->run
->s
.regs
.sregs
);
11914 if (vcpu
->run
->kvm_valid_regs
& KVM_SYNC_X86_EVENTS
)
11915 kvm_vcpu_ioctl_x86_get_vcpu_events(
11916 vcpu
, &vcpu
->run
->s
.regs
.events
);
11919 static int sync_regs(struct kvm_vcpu
*vcpu
)
11921 if (vcpu
->run
->kvm_dirty_regs
& KVM_SYNC_X86_REGS
) {
11922 __set_regs(vcpu
, &vcpu
->run
->s
.regs
.regs
);
11923 vcpu
->run
->kvm_dirty_regs
&= ~KVM_SYNC_X86_REGS
;
11926 if (vcpu
->run
->kvm_dirty_regs
& KVM_SYNC_X86_SREGS
) {
11927 struct kvm_sregs sregs
= vcpu
->run
->s
.regs
.sregs
;
11929 if (__set_sregs(vcpu
, &sregs
))
11932 vcpu
->run
->kvm_dirty_regs
&= ~KVM_SYNC_X86_SREGS
;
11935 if (vcpu
->run
->kvm_dirty_regs
& KVM_SYNC_X86_EVENTS
) {
11936 struct kvm_vcpu_events events
= vcpu
->run
->s
.regs
.events
;
11938 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu
, &events
))
11941 vcpu
->run
->kvm_dirty_regs
&= ~KVM_SYNC_X86_EVENTS
;
11947 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
11949 if (kvm_check_tsc_unstable() && kvm
->created_vcpus
)
11950 pr_warn_once("SMP vm created on host with unstable TSC; "
11951 "guest TSC will not be reliable\n");
11953 if (!kvm
->arch
.max_vcpu_ids
)
11954 kvm
->arch
.max_vcpu_ids
= KVM_MAX_VCPU_IDS
;
11956 if (id
>= kvm
->arch
.max_vcpu_ids
)
11959 return static_call(kvm_x86_vcpu_precreate
)(kvm
);
11962 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
11967 vcpu
->arch
.last_vmentry_cpu
= -1;
11968 vcpu
->arch
.regs_avail
= ~0;
11969 vcpu
->arch
.regs_dirty
= ~0;
11971 kvm_gpc_init(&vcpu
->arch
.pv_time
, vcpu
->kvm
, vcpu
, KVM_HOST_USES_PFN
);
11973 if (!irqchip_in_kernel(vcpu
->kvm
) || kvm_vcpu_is_reset_bsp(vcpu
))
11974 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
11976 vcpu
->arch
.mp_state
= KVM_MP_STATE_UNINITIALIZED
;
11978 r
= kvm_mmu_create(vcpu
);
11982 if (irqchip_in_kernel(vcpu
->kvm
)) {
11983 r
= kvm_create_lapic(vcpu
, lapic_timer_advance_ns
);
11985 goto fail_mmu_destroy
;
11988 * Defer evaluating inhibits until the vCPU is first run, as
11989 * this vCPU will not get notified of any changes until this
11990 * vCPU is visible to other vCPUs (marked online and added to
11991 * the set of vCPUs). Opportunistically mark APICv active as
11992 * VMX in particularly is highly unlikely to have inhibits.
11993 * Ignore the current per-VM APICv state so that vCPU creation
11994 * is guaranteed to run with a deterministic value, the request
11995 * will ensure the vCPU gets the correct state before VM-Entry.
11997 if (enable_apicv
) {
11998 vcpu
->arch
.apic
->apicv_active
= true;
11999 kvm_make_request(KVM_REQ_APICV_UPDATE
, vcpu
);
12002 static_branch_inc(&kvm_has_noapic_vcpu
);
12006 page
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
12008 goto fail_free_lapic
;
12009 vcpu
->arch
.pio_data
= page_address(page
);
12011 vcpu
->arch
.mce_banks
= kcalloc(KVM_MAX_MCE_BANKS
* 4, sizeof(u64
),
12012 GFP_KERNEL_ACCOUNT
);
12013 vcpu
->arch
.mci_ctl2_banks
= kcalloc(KVM_MAX_MCE_BANKS
, sizeof(u64
),
12014 GFP_KERNEL_ACCOUNT
);
12015 if (!vcpu
->arch
.mce_banks
|| !vcpu
->arch
.mci_ctl2_banks
)
12016 goto fail_free_mce_banks
;
12017 vcpu
->arch
.mcg_cap
= KVM_MAX_MCE_BANKS
;
12019 if (!zalloc_cpumask_var(&vcpu
->arch
.wbinvd_dirty_mask
,
12020 GFP_KERNEL_ACCOUNT
))
12021 goto fail_free_mce_banks
;
12023 if (!alloc_emulate_ctxt(vcpu
))
12024 goto free_wbinvd_dirty_mask
;
12026 if (!fpu_alloc_guest_fpstate(&vcpu
->arch
.guest_fpu
)) {
12027 pr_err("failed to allocate vcpu's fpu\n");
12028 goto free_emulate_ctxt
;
12031 vcpu
->arch
.maxphyaddr
= cpuid_query_maxphyaddr(vcpu
);
12032 vcpu
->arch
.reserved_gpa_bits
= kvm_vcpu_reserved_gpa_bits_raw(vcpu
);
12034 vcpu
->arch
.pat
= MSR_IA32_CR_PAT_DEFAULT
;
12036 kvm_async_pf_hash_reset(vcpu
);
12038 vcpu
->arch
.perf_capabilities
= kvm_caps
.supported_perf_cap
;
12039 kvm_pmu_init(vcpu
);
12041 vcpu
->arch
.pending_external_vector
= -1;
12042 vcpu
->arch
.preempted_in_kernel
= false;
12044 #if IS_ENABLED(CONFIG_HYPERV)
12045 vcpu
->arch
.hv_root_tdp
= INVALID_PAGE
;
12048 r
= static_call(kvm_x86_vcpu_create
)(vcpu
);
12050 goto free_guest_fpu
;
12052 vcpu
->arch
.arch_capabilities
= kvm_get_arch_capabilities();
12053 vcpu
->arch
.msr_platform_info
= MSR_PLATFORM_INFO_CPUID_FAULT
;
12054 kvm_xen_init_vcpu(vcpu
);
12055 kvm_vcpu_mtrr_init(vcpu
);
12057 kvm_set_tsc_khz(vcpu
, vcpu
->kvm
->arch
.default_tsc_khz
);
12058 kvm_vcpu_reset(vcpu
, false);
12059 kvm_init_mmu(vcpu
);
12064 fpu_free_guest_fpstate(&vcpu
->arch
.guest_fpu
);
12066 kmem_cache_free(x86_emulator_cache
, vcpu
->arch
.emulate_ctxt
);
12067 free_wbinvd_dirty_mask
:
12068 free_cpumask_var(vcpu
->arch
.wbinvd_dirty_mask
);
12069 fail_free_mce_banks
:
12070 kfree(vcpu
->arch
.mce_banks
);
12071 kfree(vcpu
->arch
.mci_ctl2_banks
);
12072 free_page((unsigned long)vcpu
->arch
.pio_data
);
12074 kvm_free_lapic(vcpu
);
12076 kvm_mmu_destroy(vcpu
);
12080 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
12082 struct kvm
*kvm
= vcpu
->kvm
;
12084 if (mutex_lock_killable(&vcpu
->mutex
))
12087 kvm_synchronize_tsc(vcpu
, NULL
);
12090 /* poll control enabled by default */
12091 vcpu
->arch
.msr_kvm_poll_control
= 1;
12093 mutex_unlock(&vcpu
->mutex
);
12095 if (kvmclock_periodic_sync
&& vcpu
->vcpu_idx
== 0)
12096 schedule_delayed_work(&kvm
->arch
.kvmclock_sync_work
,
12097 KVMCLOCK_SYNC_PERIOD
);
12100 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
12104 kvmclock_reset(vcpu
);
12106 static_call(kvm_x86_vcpu_free
)(vcpu
);
12108 kmem_cache_free(x86_emulator_cache
, vcpu
->arch
.emulate_ctxt
);
12109 free_cpumask_var(vcpu
->arch
.wbinvd_dirty_mask
);
12110 fpu_free_guest_fpstate(&vcpu
->arch
.guest_fpu
);
12112 kvm_xen_destroy_vcpu(vcpu
);
12113 kvm_hv_vcpu_uninit(vcpu
);
12114 kvm_pmu_destroy(vcpu
);
12115 kfree(vcpu
->arch
.mce_banks
);
12116 kfree(vcpu
->arch
.mci_ctl2_banks
);
12117 kvm_free_lapic(vcpu
);
12118 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
12119 kvm_mmu_destroy(vcpu
);
12120 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
12121 free_page((unsigned long)vcpu
->arch
.pio_data
);
12122 kvfree(vcpu
->arch
.cpuid_entries
);
12123 if (!lapic_in_kernel(vcpu
))
12124 static_branch_dec(&kvm_has_noapic_vcpu
);
12127 void kvm_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
12129 struct kvm_cpuid_entry2
*cpuid_0x1
;
12130 unsigned long old_cr0
= kvm_read_cr0(vcpu
);
12131 unsigned long new_cr0
;
12134 * Several of the "set" flows, e.g. ->set_cr0(), read other registers
12135 * to handle side effects. RESET emulation hits those flows and relies
12136 * on emulated/virtualized registers, including those that are loaded
12137 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel
12138 * to detect improper or missing initialization.
12140 WARN_ON_ONCE(!init_event
&&
12141 (old_cr0
|| kvm_read_cr3(vcpu
) || kvm_read_cr4(vcpu
)));
12144 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
12145 * possible to INIT the vCPU while L2 is active. Force the vCPU back
12146 * into L1 as EFER.SVME is cleared on INIT (along with all other EFER
12147 * bits), i.e. virtualization is disabled.
12149 if (is_guest_mode(vcpu
))
12150 kvm_leave_nested(vcpu
);
12152 kvm_lapic_reset(vcpu
, init_event
);
12154 WARN_ON_ONCE(is_guest_mode(vcpu
) || is_smm(vcpu
));
12155 vcpu
->arch
.hflags
= 0;
12157 vcpu
->arch
.smi_pending
= 0;
12158 vcpu
->arch
.smi_count
= 0;
12159 atomic_set(&vcpu
->arch
.nmi_queued
, 0);
12160 vcpu
->arch
.nmi_pending
= 0;
12161 vcpu
->arch
.nmi_injected
= false;
12162 kvm_clear_interrupt_queue(vcpu
);
12163 kvm_clear_exception_queue(vcpu
);
12165 memset(vcpu
->arch
.db
, 0, sizeof(vcpu
->arch
.db
));
12166 kvm_update_dr0123(vcpu
);
12167 vcpu
->arch
.dr6
= DR6_ACTIVE_LOW
;
12168 vcpu
->arch
.dr7
= DR7_FIXED_1
;
12169 kvm_update_dr7(vcpu
);
12171 vcpu
->arch
.cr2
= 0;
12173 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
12174 vcpu
->arch
.apf
.msr_en_val
= 0;
12175 vcpu
->arch
.apf
.msr_int_val
= 0;
12176 vcpu
->arch
.st
.msr_val
= 0;
12178 kvmclock_reset(vcpu
);
12180 kvm_clear_async_pf_completion_queue(vcpu
);
12181 kvm_async_pf_hash_reset(vcpu
);
12182 vcpu
->arch
.apf
.halted
= false;
12184 if (vcpu
->arch
.guest_fpu
.fpstate
&& kvm_mpx_supported()) {
12185 struct fpstate
*fpstate
= vcpu
->arch
.guest_fpu
.fpstate
;
12188 * All paths that lead to INIT are required to load the guest's
12189 * FPU state (because most paths are buried in KVM_RUN).
12192 kvm_put_guest_fpu(vcpu
);
12194 fpstate_clear_xstate_component(fpstate
, XFEATURE_BNDREGS
);
12195 fpstate_clear_xstate_component(fpstate
, XFEATURE_BNDCSR
);
12198 kvm_load_guest_fpu(vcpu
);
12202 kvm_pmu_reset(vcpu
);
12203 vcpu
->arch
.smbase
= 0x30000;
12205 vcpu
->arch
.msr_misc_features_enables
= 0;
12206 vcpu
->arch
.ia32_misc_enable_msr
= MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
|
12207 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
;
12209 __kvm_set_xcr(vcpu
, 0, XFEATURE_MASK_FP
);
12210 __kvm_set_msr(vcpu
, MSR_IA32_XSS
, 0, true);
12213 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
12214 memset(vcpu
->arch
.regs
, 0, sizeof(vcpu
->arch
.regs
));
12215 kvm_register_mark_dirty(vcpu
, VCPU_REGS_RSP
);
12218 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon)
12219 * if no CPUID match is found. Note, it's impossible to get a match at
12220 * RESET since KVM emulates RESET before exposing the vCPU to userspace,
12221 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
12222 * on RESET. But, go through the motions in case that's ever remedied.
12224 cpuid_0x1
= kvm_find_cpuid_entry(vcpu
, 1);
12225 kvm_rdx_write(vcpu
, cpuid_0x1
? cpuid_0x1
->eax
: 0x600);
12227 static_call(kvm_x86_vcpu_reset
)(vcpu
, init_event
);
12229 kvm_set_rflags(vcpu
, X86_EFLAGS_FIXED
);
12230 kvm_rip_write(vcpu
, 0xfff0);
12232 vcpu
->arch
.cr3
= 0;
12233 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_CR3
);
12236 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
12237 * of Intel's SDM list CD/NW as being set on INIT, but they contradict
12238 * (or qualify) that with a footnote stating that CD/NW are preserved.
12240 new_cr0
= X86_CR0_ET
;
12242 new_cr0
|= (old_cr0
& (X86_CR0_NW
| X86_CR0_CD
));
12244 new_cr0
|= X86_CR0_NW
| X86_CR0_CD
;
12246 static_call(kvm_x86_set_cr0
)(vcpu
, new_cr0
);
12247 static_call(kvm_x86_set_cr4
)(vcpu
, 0);
12248 static_call(kvm_x86_set_efer
)(vcpu
, 0);
12249 static_call(kvm_x86_update_exception_bitmap
)(vcpu
);
12252 * On the standard CR0/CR4/EFER modification paths, there are several
12253 * complex conditions determining whether the MMU has to be reset and/or
12254 * which PCIDs have to be flushed. However, CR0.WP and the paging-related
12255 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush
12256 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as
12257 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here.
12259 if (old_cr0
& X86_CR0_PG
) {
12260 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
12261 kvm_mmu_reset_context(vcpu
);
12265 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's
12266 * APM states the TLBs are untouched by INIT, but it also states that
12267 * the TLBs are flushed on "External initialization of the processor."
12268 * Flush the guest TLB regardless of vendor, there is no meaningful
12269 * benefit in relying on the guest to flush the TLB immediately after
12270 * INIT. A spurious TLB flush is benign and likely negligible from a
12271 * performance perspective.
12274 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
12276 EXPORT_SYMBOL_GPL(kvm_vcpu_reset
);
12278 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
)
12280 struct kvm_segment cs
;
12282 kvm_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
12283 cs
.selector
= vector
<< 8;
12284 cs
.base
= vector
<< 12;
12285 kvm_set_segment(vcpu
, &cs
, VCPU_SREG_CS
);
12286 kvm_rip_write(vcpu
, 0);
12288 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector
);
12290 int kvm_arch_hardware_enable(void)
12293 struct kvm_vcpu
*vcpu
;
12298 bool stable
, backwards_tsc
= false;
12300 kvm_user_return_msr_cpu_online();
12302 ret
= kvm_x86_check_processor_compatibility();
12306 ret
= static_call(kvm_x86_hardware_enable
)();
12310 local_tsc
= rdtsc();
12311 stable
= !kvm_check_tsc_unstable();
12312 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
12313 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
12314 if (!stable
&& vcpu
->cpu
== smp_processor_id())
12315 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
12316 if (stable
&& vcpu
->arch
.last_host_tsc
> local_tsc
) {
12317 backwards_tsc
= true;
12318 if (vcpu
->arch
.last_host_tsc
> max_tsc
)
12319 max_tsc
= vcpu
->arch
.last_host_tsc
;
12325 * Sometimes, even reliable TSCs go backwards. This happens on
12326 * platforms that reset TSC during suspend or hibernate actions, but
12327 * maintain synchronization. We must compensate. Fortunately, we can
12328 * detect that condition here, which happens early in CPU bringup,
12329 * before any KVM threads can be running. Unfortunately, we can't
12330 * bring the TSCs fully up to date with real time, as we aren't yet far
12331 * enough into CPU bringup that we know how much real time has actually
12332 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
12333 * variables that haven't been updated yet.
12335 * So we simply find the maximum observed TSC above, then record the
12336 * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
12337 * the adjustment will be applied. Note that we accumulate
12338 * adjustments, in case multiple suspend cycles happen before some VCPU
12339 * gets a chance to run again. In the event that no KVM threads get a
12340 * chance to run, we will miss the entire elapsed period, as we'll have
12341 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
12342 * loose cycle time. This isn't too big a deal, since the loss will be
12343 * uniform across all VCPUs (not to mention the scenario is extremely
12344 * unlikely). It is possible that a second hibernate recovery happens
12345 * much faster than a first, causing the observed TSC here to be
12346 * smaller; this would require additional padding adjustment, which is
12347 * why we set last_host_tsc to the local tsc observed here.
12349 * N.B. - this code below runs only on platforms with reliable TSC,
12350 * as that is the only way backwards_tsc is set above. Also note
12351 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
12352 * have the same delta_cyc adjustment applied if backwards_tsc
12353 * is detected. Note further, this adjustment is only done once,
12354 * as we reset last_host_tsc on all VCPUs to stop this from being
12355 * called multiple times (one for each physical CPU bringup).
12357 * Platforms with unreliable TSCs don't have to deal with this, they
12358 * will be compensated by the logic in vcpu_load, which sets the TSC to
12359 * catchup mode. This will catchup all VCPUs to real time, but cannot
12360 * guarantee that they stay in perfect synchronization.
12362 if (backwards_tsc
) {
12363 u64 delta_cyc
= max_tsc
- local_tsc
;
12364 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
12365 kvm
->arch
.backwards_tsc_observed
= true;
12366 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
12367 vcpu
->arch
.tsc_offset_adjustment
+= delta_cyc
;
12368 vcpu
->arch
.last_host_tsc
= local_tsc
;
12369 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE
, vcpu
);
12373 * We have to disable TSC offset matching.. if you were
12374 * booting a VM while issuing an S4 host suspend....
12375 * you may have some problem. Solving this issue is
12376 * left as an exercise to the reader.
12378 kvm
->arch
.last_tsc_nsec
= 0;
12379 kvm
->arch
.last_tsc_write
= 0;
12386 void kvm_arch_hardware_disable(void)
12388 static_call(kvm_x86_hardware_disable
)();
12389 drop_user_return_notifiers();
12392 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu
*vcpu
)
12394 return vcpu
->kvm
->arch
.bsp_vcpu_id
== vcpu
->vcpu_id
;
12397 bool kvm_vcpu_is_bsp(struct kvm_vcpu
*vcpu
)
12399 return (vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_BSP
) != 0;
12402 __read_mostly
DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu
);
12403 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu
);
12405 void kvm_arch_sched_in(struct kvm_vcpu
*vcpu
, int cpu
)
12407 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
12409 vcpu
->arch
.l1tf_flush_l1d
= true;
12410 if (pmu
->version
&& unlikely(pmu
->event_count
)) {
12411 pmu
->need_cleanup
= true;
12412 kvm_make_request(KVM_REQ_PMU
, vcpu
);
12414 static_call(kvm_x86_sched_in
)(vcpu
, cpu
);
12417 void kvm_arch_free_vm(struct kvm
*kvm
)
12419 kfree(to_kvm_hv(kvm
)->hv_pa_pg
);
12420 __kvm_arch_free_vm(kvm
);
12424 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
12427 unsigned long flags
;
12432 ret
= kvm_page_track_init(kvm
);
12436 kvm_mmu_init_vm(kvm
);
12438 ret
= static_call(kvm_x86_vm_init
)(kvm
);
12440 goto out_uninit_mmu
;
12442 INIT_HLIST_HEAD(&kvm
->arch
.mask_notifier_list
);
12443 atomic_set(&kvm
->arch
.noncoherent_dma_count
, 0);
12445 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
12446 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID
, &kvm
->arch
.irq_sources_bitmap
);
12447 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
12448 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
12449 &kvm
->arch
.irq_sources_bitmap
);
12451 raw_spin_lock_init(&kvm
->arch
.tsc_write_lock
);
12452 mutex_init(&kvm
->arch
.apic_map_lock
);
12453 seqcount_raw_spinlock_init(&kvm
->arch
.pvclock_sc
, &kvm
->arch
.tsc_write_lock
);
12454 kvm
->arch
.kvmclock_offset
= -get_kvmclock_base_ns();
12456 raw_spin_lock_irqsave(&kvm
->arch
.tsc_write_lock
, flags
);
12457 pvclock_update_vm_gtod_copy(kvm
);
12458 raw_spin_unlock_irqrestore(&kvm
->arch
.tsc_write_lock
, flags
);
12460 kvm
->arch
.default_tsc_khz
= max_tsc_khz
? : tsc_khz
;
12461 kvm
->arch
.guest_can_read_msr_platform_info
= true;
12462 kvm
->arch
.enable_pmu
= enable_pmu
;
12464 #if IS_ENABLED(CONFIG_HYPERV)
12465 spin_lock_init(&kvm
->arch
.hv_root_tdp_lock
);
12466 kvm
->arch
.hv_root_tdp
= INVALID_PAGE
;
12469 INIT_DELAYED_WORK(&kvm
->arch
.kvmclock_update_work
, kvmclock_update_fn
);
12470 INIT_DELAYED_WORK(&kvm
->arch
.kvmclock_sync_work
, kvmclock_sync_fn
);
12472 kvm_apicv_init(kvm
);
12473 kvm_hv_init_vm(kvm
);
12474 kvm_xen_init_vm(kvm
);
12479 kvm_mmu_uninit_vm(kvm
);
12480 kvm_page_track_cleanup(kvm
);
12485 int kvm_arch_post_init_vm(struct kvm
*kvm
)
12487 return kvm_mmu_post_init_vm(kvm
);
12490 static void kvm_unload_vcpu_mmu(struct kvm_vcpu
*vcpu
)
12493 kvm_mmu_unload(vcpu
);
12497 static void kvm_unload_vcpu_mmus(struct kvm
*kvm
)
12500 struct kvm_vcpu
*vcpu
;
12502 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
12503 kvm_clear_async_pf_completion_queue(vcpu
);
12504 kvm_unload_vcpu_mmu(vcpu
);
12508 void kvm_arch_sync_events(struct kvm
*kvm
)
12510 cancel_delayed_work_sync(&kvm
->arch
.kvmclock_sync_work
);
12511 cancel_delayed_work_sync(&kvm
->arch
.kvmclock_update_work
);
12516 * __x86_set_memory_region: Setup KVM internal memory slot
12518 * @kvm: the kvm pointer to the VM.
12519 * @id: the slot ID to setup.
12520 * @gpa: the GPA to install the slot (unused when @size == 0).
12521 * @size: the size of the slot. Set to zero to uninstall a slot.
12523 * This function helps to setup a KVM internal memory slot. Specify
12524 * @size > 0 to install a new slot, while @size == 0 to uninstall a
12525 * slot. The return code can be one of the following:
12527 * HVA: on success (uninstall will return a bogus HVA)
12530 * The caller should always use IS_ERR() to check the return value
12531 * before use. Note, the KVM internal memory slots are guaranteed to
12532 * remain valid and unchanged until the VM is destroyed, i.e., the
12533 * GPA->HVA translation will not change. However, the HVA is a user
12534 * address, i.e. its accessibility is not guaranteed, and must be
12535 * accessed via __copy_{to,from}_user().
12537 void __user
* __x86_set_memory_region(struct kvm
*kvm
, int id
, gpa_t gpa
,
12541 unsigned long hva
, old_npages
;
12542 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
12543 struct kvm_memory_slot
*slot
;
12545 /* Called with kvm->slots_lock held. */
12546 if (WARN_ON(id
>= KVM_MEM_SLOTS_NUM
))
12547 return ERR_PTR_USR(-EINVAL
);
12549 slot
= id_to_memslot(slots
, id
);
12551 if (slot
&& slot
->npages
)
12552 return ERR_PTR_USR(-EEXIST
);
12555 * MAP_SHARED to prevent internal slot pages from being moved
12558 hva
= vm_mmap(NULL
, 0, size
, PROT_READ
| PROT_WRITE
,
12559 MAP_SHARED
| MAP_ANONYMOUS
, 0);
12560 if (IS_ERR_VALUE(hva
))
12561 return (void __user
*)hva
;
12563 if (!slot
|| !slot
->npages
)
12566 old_npages
= slot
->npages
;
12567 hva
= slot
->userspace_addr
;
12570 for (i
= 0; i
< KVM_ADDRESS_SPACE_NUM
; i
++) {
12571 struct kvm_userspace_memory_region m
;
12573 m
.slot
= id
| (i
<< 16);
12575 m
.guest_phys_addr
= gpa
;
12576 m
.userspace_addr
= hva
;
12577 m
.memory_size
= size
;
12578 r
= __kvm_set_memory_region(kvm
, &m
);
12580 return ERR_PTR_USR(r
);
12584 vm_munmap(hva
, old_npages
* PAGE_SIZE
);
12586 return (void __user
*)hva
;
12588 EXPORT_SYMBOL_GPL(__x86_set_memory_region
);
12590 void kvm_arch_pre_destroy_vm(struct kvm
*kvm
)
12592 kvm_mmu_pre_destroy_vm(kvm
);
12595 void kvm_arch_destroy_vm(struct kvm
*kvm
)
12597 if (current
->mm
== kvm
->mm
) {
12599 * Free memory regions allocated on behalf of userspace,
12600 * unless the memory map has changed due to process exit
12603 mutex_lock(&kvm
->slots_lock
);
12604 __x86_set_memory_region(kvm
, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
,
12606 __x86_set_memory_region(kvm
, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT
,
12608 __x86_set_memory_region(kvm
, TSS_PRIVATE_MEMSLOT
, 0, 0);
12609 mutex_unlock(&kvm
->slots_lock
);
12611 kvm_unload_vcpu_mmus(kvm
);
12612 static_call_cond(kvm_x86_vm_destroy
)(kvm
);
12613 kvm_free_msr_filter(srcu_dereference_check(kvm
->arch
.msr_filter
, &kvm
->srcu
, 1));
12614 kvm_pic_destroy(kvm
);
12615 kvm_ioapic_destroy(kvm
);
12616 kvm_destroy_vcpus(kvm
);
12617 kvfree(rcu_dereference_check(kvm
->arch
.apic_map
, 1));
12618 kfree(srcu_dereference_check(kvm
->arch
.pmu_event_filter
, &kvm
->srcu
, 1));
12619 kvm_mmu_uninit_vm(kvm
);
12620 kvm_page_track_cleanup(kvm
);
12621 kvm_xen_destroy_vm(kvm
);
12622 kvm_hv_destroy_vm(kvm
);
12625 static void memslot_rmap_free(struct kvm_memory_slot
*slot
)
12629 for (i
= 0; i
< KVM_NR_PAGE_SIZES
; ++i
) {
12630 kvfree(slot
->arch
.rmap
[i
]);
12631 slot
->arch
.rmap
[i
] = NULL
;
12635 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
12639 memslot_rmap_free(slot
);
12641 for (i
= 1; i
< KVM_NR_PAGE_SIZES
; ++i
) {
12642 kvfree(slot
->arch
.lpage_info
[i
- 1]);
12643 slot
->arch
.lpage_info
[i
- 1] = NULL
;
12646 kvm_page_track_free_memslot(slot
);
12649 int memslot_rmap_alloc(struct kvm_memory_slot
*slot
, unsigned long npages
)
12651 const int sz
= sizeof(*slot
->arch
.rmap
[0]);
12654 for (i
= 0; i
< KVM_NR_PAGE_SIZES
; ++i
) {
12656 int lpages
= __kvm_mmu_slot_lpages(slot
, npages
, level
);
12658 if (slot
->arch
.rmap
[i
])
12661 slot
->arch
.rmap
[i
] = __vcalloc(lpages
, sz
, GFP_KERNEL_ACCOUNT
);
12662 if (!slot
->arch
.rmap
[i
]) {
12663 memslot_rmap_free(slot
);
12671 static int kvm_alloc_memslot_metadata(struct kvm
*kvm
,
12672 struct kvm_memory_slot
*slot
)
12674 unsigned long npages
= slot
->npages
;
12678 * Clear out the previous array pointers for the KVM_MR_MOVE case. The
12679 * old arrays will be freed by __kvm_set_memory_region() if installing
12680 * the new memslot is successful.
12682 memset(&slot
->arch
, 0, sizeof(slot
->arch
));
12684 if (kvm_memslots_have_rmaps(kvm
)) {
12685 r
= memslot_rmap_alloc(slot
, npages
);
12690 for (i
= 1; i
< KVM_NR_PAGE_SIZES
; ++i
) {
12691 struct kvm_lpage_info
*linfo
;
12692 unsigned long ugfn
;
12696 lpages
= __kvm_mmu_slot_lpages(slot
, npages
, level
);
12698 linfo
= __vcalloc(lpages
, sizeof(*linfo
), GFP_KERNEL_ACCOUNT
);
12702 slot
->arch
.lpage_info
[i
- 1] = linfo
;
12704 if (slot
->base_gfn
& (KVM_PAGES_PER_HPAGE(level
) - 1))
12705 linfo
[0].disallow_lpage
= 1;
12706 if ((slot
->base_gfn
+ npages
) & (KVM_PAGES_PER_HPAGE(level
) - 1))
12707 linfo
[lpages
- 1].disallow_lpage
= 1;
12708 ugfn
= slot
->userspace_addr
>> PAGE_SHIFT
;
12710 * If the gfn and userspace address are not aligned wrt each
12711 * other, disable large page support for this slot.
12713 if ((slot
->base_gfn
^ ugfn
) & (KVM_PAGES_PER_HPAGE(level
) - 1)) {
12716 for (j
= 0; j
< lpages
; ++j
)
12717 linfo
[j
].disallow_lpage
= 1;
12721 if (kvm_page_track_create_memslot(kvm
, slot
, npages
))
12727 memslot_rmap_free(slot
);
12729 for (i
= 1; i
< KVM_NR_PAGE_SIZES
; ++i
) {
12730 kvfree(slot
->arch
.lpage_info
[i
- 1]);
12731 slot
->arch
.lpage_info
[i
- 1] = NULL
;
12736 void kvm_arch_memslots_updated(struct kvm
*kvm
, u64 gen
)
12738 struct kvm_vcpu
*vcpu
;
12742 * memslots->generation has been incremented.
12743 * mmio generation may have reached its maximum value.
12745 kvm_mmu_invalidate_mmio_sptes(kvm
, gen
);
12747 /* Force re-initialization of steal_time cache */
12748 kvm_for_each_vcpu(i
, vcpu
, kvm
)
12749 kvm_vcpu_kick(vcpu
);
12752 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
12753 const struct kvm_memory_slot
*old
,
12754 struct kvm_memory_slot
*new,
12755 enum kvm_mr_change change
)
12758 * KVM doesn't support moving memslots when there are external page
12759 * trackers attached to the VM, i.e. if KVMGT is in use.
12761 if (change
== KVM_MR_MOVE
&& kvm_page_track_has_external_user(kvm
))
12764 if (change
== KVM_MR_CREATE
|| change
== KVM_MR_MOVE
) {
12765 if ((new->base_gfn
+ new->npages
- 1) > kvm_mmu_max_gfn())
12768 return kvm_alloc_memslot_metadata(kvm
, new);
12771 if (change
== KVM_MR_FLAGS_ONLY
)
12772 memcpy(&new->arch
, &old
->arch
, sizeof(old
->arch
));
12773 else if (WARN_ON_ONCE(change
!= KVM_MR_DELETE
))
12780 static void kvm_mmu_update_cpu_dirty_logging(struct kvm
*kvm
, bool enable
)
12784 if (!kvm_x86_ops
.cpu_dirty_log_size
)
12787 nr_slots
= atomic_read(&kvm
->nr_memslots_dirty_logging
);
12788 if ((enable
&& nr_slots
== 1) || !nr_slots
)
12789 kvm_make_all_cpus_request(kvm
, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING
);
12792 static void kvm_mmu_slot_apply_flags(struct kvm
*kvm
,
12793 struct kvm_memory_slot
*old
,
12794 const struct kvm_memory_slot
*new,
12795 enum kvm_mr_change change
)
12797 u32 old_flags
= old
? old
->flags
: 0;
12798 u32 new_flags
= new ? new->flags
: 0;
12799 bool log_dirty_pages
= new_flags
& KVM_MEM_LOG_DIRTY_PAGES
;
12802 * Update CPU dirty logging if dirty logging is being toggled. This
12803 * applies to all operations.
12805 if ((old_flags
^ new_flags
) & KVM_MEM_LOG_DIRTY_PAGES
)
12806 kvm_mmu_update_cpu_dirty_logging(kvm
, log_dirty_pages
);
12809 * Nothing more to do for RO slots (which can't be dirtied and can't be
12810 * made writable) or CREATE/MOVE/DELETE of a slot.
12812 * For a memslot with dirty logging disabled:
12813 * CREATE: No dirty mappings will already exist.
12814 * MOVE/DELETE: The old mappings will already have been cleaned up by
12815 * kvm_arch_flush_shadow_memslot()
12817 * For a memslot with dirty logging enabled:
12818 * CREATE: No shadow pages exist, thus nothing to write-protect
12819 * and no dirty bits to clear.
12820 * MOVE/DELETE: The old mappings will already have been cleaned up by
12821 * kvm_arch_flush_shadow_memslot().
12823 if ((change
!= KVM_MR_FLAGS_ONLY
) || (new_flags
& KVM_MEM_READONLY
))
12827 * READONLY and non-flags changes were filtered out above, and the only
12828 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
12829 * logging isn't being toggled on or off.
12831 if (WARN_ON_ONCE(!((old_flags
^ new_flags
) & KVM_MEM_LOG_DIRTY_PAGES
)))
12834 if (!log_dirty_pages
) {
12836 * Dirty logging tracks sptes in 4k granularity, meaning that
12837 * large sptes have to be split. If live migration succeeds,
12838 * the guest in the source machine will be destroyed and large
12839 * sptes will be created in the destination. However, if the
12840 * guest continues to run in the source machine (for example if
12841 * live migration fails), small sptes will remain around and
12842 * cause bad performance.
12844 * Scan sptes if dirty logging has been stopped, dropping those
12845 * which can be collapsed into a single large-page spte. Later
12846 * page faults will create the large-page sptes.
12848 kvm_mmu_zap_collapsible_sptes(kvm
, new);
12851 * Initially-all-set does not require write protecting any page,
12852 * because they're all assumed to be dirty.
12854 if (kvm_dirty_log_manual_protect_and_init_set(kvm
))
12857 if (READ_ONCE(eager_page_split
))
12858 kvm_mmu_slot_try_split_huge_pages(kvm
, new, PG_LEVEL_4K
);
12860 if (kvm_x86_ops
.cpu_dirty_log_size
) {
12861 kvm_mmu_slot_leaf_clear_dirty(kvm
, new);
12862 kvm_mmu_slot_remove_write_access(kvm
, new, PG_LEVEL_2M
);
12864 kvm_mmu_slot_remove_write_access(kvm
, new, PG_LEVEL_4K
);
12868 * Unconditionally flush the TLBs after enabling dirty logging.
12869 * A flush is almost always going to be necessary (see below),
12870 * and unconditionally flushing allows the helpers to omit
12871 * the subtly complex checks when removing write access.
12873 * Do the flush outside of mmu_lock to reduce the amount of
12874 * time mmu_lock is held. Flushing after dropping mmu_lock is
12875 * safe as KVM only needs to guarantee the slot is fully
12876 * write-protected before returning to userspace, i.e. before
12877 * userspace can consume the dirty status.
12879 * Flushing outside of mmu_lock requires KVM to be careful when
12880 * making decisions based on writable status of an SPTE, e.g. a
12881 * !writable SPTE doesn't guarantee a CPU can't perform writes.
12883 * Specifically, KVM also write-protects guest page tables to
12884 * monitor changes when using shadow paging, and must guarantee
12885 * no CPUs can write to those page before mmu_lock is dropped.
12886 * Because CPUs may have stale TLB entries at this point, a
12887 * !writable SPTE doesn't guarantee CPUs can't perform writes.
12889 * KVM also allows making SPTES writable outside of mmu_lock,
12890 * e.g. to allow dirty logging without taking mmu_lock.
12892 * To handle these scenarios, KVM uses a separate software-only
12893 * bit (MMU-writable) to track if a SPTE is !writable due to
12894 * a guest page table being write-protected (KVM clears the
12895 * MMU-writable flag when write-protecting for shadow paging).
12897 * The use of MMU-writable is also the primary motivation for
12898 * the unconditional flush. Because KVM must guarantee that a
12899 * CPU doesn't contain stale, writable TLB entries for a
12900 * !MMU-writable SPTE, KVM must flush if it encounters any
12901 * MMU-writable SPTE regardless of whether the actual hardware
12902 * writable bit was set. I.e. KVM is almost guaranteed to need
12903 * to flush, while unconditionally flushing allows the "remove
12904 * write access" helpers to ignore MMU-writable entirely.
12906 * See is_writable_pte() for more details (the case involving
12907 * access-tracked SPTEs is particularly relevant).
12909 kvm_flush_remote_tlbs_memslot(kvm
, new);
12913 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
12914 struct kvm_memory_slot
*old
,
12915 const struct kvm_memory_slot
*new,
12916 enum kvm_mr_change change
)
12918 if (change
== KVM_MR_DELETE
)
12919 kvm_page_track_delete_slot(kvm
, old
);
12921 if (!kvm
->arch
.n_requested_mmu_pages
&&
12922 (change
== KVM_MR_CREATE
|| change
== KVM_MR_DELETE
)) {
12923 unsigned long nr_mmu_pages
;
12925 nr_mmu_pages
= kvm
->nr_memslot_pages
/ KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO
;
12926 nr_mmu_pages
= max(nr_mmu_pages
, KVM_MIN_ALLOC_MMU_PAGES
);
12927 kvm_mmu_change_mmu_pages(kvm
, nr_mmu_pages
);
12930 kvm_mmu_slot_apply_flags(kvm
, old
, new, change
);
12932 /* Free the arrays associated with the old memslot. */
12933 if (change
== KVM_MR_MOVE
)
12934 kvm_arch_free_memslot(kvm
, old
);
12937 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
12939 return (is_guest_mode(vcpu
) &&
12940 static_call(kvm_x86_guest_apic_has_interrupt
)(vcpu
));
12943 static inline bool kvm_vcpu_has_events(struct kvm_vcpu
*vcpu
)
12945 if (!list_empty_careful(&vcpu
->async_pf
.done
))
12948 if (kvm_apic_has_pending_init_or_sipi(vcpu
) &&
12949 kvm_apic_init_sipi_allowed(vcpu
))
12952 if (vcpu
->arch
.pv
.pv_unhalted
)
12955 if (kvm_is_exception_pending(vcpu
))
12958 if (kvm_test_request(KVM_REQ_NMI
, vcpu
) ||
12959 (vcpu
->arch
.nmi_pending
&&
12960 static_call(kvm_x86_nmi_allowed
)(vcpu
, false)))
12963 #ifdef CONFIG_KVM_SMM
12964 if (kvm_test_request(KVM_REQ_SMI
, vcpu
) ||
12965 (vcpu
->arch
.smi_pending
&&
12966 static_call(kvm_x86_smi_allowed
)(vcpu
, false)))
12970 if (kvm_test_request(KVM_REQ_PMI
, vcpu
))
12973 if (kvm_arch_interrupt_allowed(vcpu
) &&
12974 (kvm_cpu_has_interrupt(vcpu
) ||
12975 kvm_guest_apic_has_interrupt(vcpu
)))
12978 if (kvm_hv_has_stimer_pending(vcpu
))
12981 if (is_guest_mode(vcpu
) &&
12982 kvm_x86_ops
.nested_ops
->has_events
&&
12983 kvm_x86_ops
.nested_ops
->has_events(vcpu
))
12986 if (kvm_xen_has_pending_events(vcpu
))
12992 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
12994 return kvm_vcpu_running(vcpu
) || kvm_vcpu_has_events(vcpu
);
12997 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu
*vcpu
)
12999 if (kvm_vcpu_apicv_active(vcpu
) &&
13000 static_call(kvm_x86_dy_apicv_has_pending_interrupt
)(vcpu
))
13006 bool kvm_arch_dy_runnable(struct kvm_vcpu
*vcpu
)
13008 if (READ_ONCE(vcpu
->arch
.pv
.pv_unhalted
))
13011 if (kvm_test_request(KVM_REQ_NMI
, vcpu
) ||
13012 #ifdef CONFIG_KVM_SMM
13013 kvm_test_request(KVM_REQ_SMI
, vcpu
) ||
13015 kvm_test_request(KVM_REQ_EVENT
, vcpu
))
13018 return kvm_arch_dy_has_pending_interrupt(vcpu
);
13021 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
13023 if (vcpu
->arch
.guest_state_protected
)
13026 return vcpu
->arch
.preempted_in_kernel
;
13029 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu
*vcpu
)
13031 return kvm_rip_read(vcpu
);
13034 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
13036 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
13039 int kvm_arch_interrupt_allowed(struct kvm_vcpu
*vcpu
)
13041 return static_call(kvm_x86_interrupt_allowed
)(vcpu
, false);
13044 unsigned long kvm_get_linear_rip(struct kvm_vcpu
*vcpu
)
13046 /* Can't read the RIP when guest state is protected, just return 0 */
13047 if (vcpu
->arch
.guest_state_protected
)
13050 if (is_64_bit_mode(vcpu
))
13051 return kvm_rip_read(vcpu
);
13052 return (u32
)(get_segment_base(vcpu
, VCPU_SREG_CS
) +
13053 kvm_rip_read(vcpu
));
13055 EXPORT_SYMBOL_GPL(kvm_get_linear_rip
);
13057 bool kvm_is_linear_rip(struct kvm_vcpu
*vcpu
, unsigned long linear_rip
)
13059 return kvm_get_linear_rip(vcpu
) == linear_rip
;
13061 EXPORT_SYMBOL_GPL(kvm_is_linear_rip
);
13063 unsigned long kvm_get_rflags(struct kvm_vcpu
*vcpu
)
13065 unsigned long rflags
;
13067 rflags
= static_call(kvm_x86_get_rflags
)(vcpu
);
13068 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
)
13069 rflags
&= ~X86_EFLAGS_TF
;
13072 EXPORT_SYMBOL_GPL(kvm_get_rflags
);
13074 static void __kvm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
13076 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
&&
13077 kvm_is_linear_rip(vcpu
, vcpu
->arch
.singlestep_rip
))
13078 rflags
|= X86_EFLAGS_TF
;
13079 static_call(kvm_x86_set_rflags
)(vcpu
, rflags
);
13082 void kvm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
13084 __kvm_set_rflags(vcpu
, rflags
);
13085 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
13087 EXPORT_SYMBOL_GPL(kvm_set_rflags
);
13089 static inline u32
kvm_async_pf_hash_fn(gfn_t gfn
)
13091 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU
));
13093 return hash_32(gfn
& 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU
));
13096 static inline u32
kvm_async_pf_next_probe(u32 key
)
13098 return (key
+ 1) & (ASYNC_PF_PER_VCPU
- 1);
13101 static void kvm_add_async_pf_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
13103 u32 key
= kvm_async_pf_hash_fn(gfn
);
13105 while (vcpu
->arch
.apf
.gfns
[key
] != ~0)
13106 key
= kvm_async_pf_next_probe(key
);
13108 vcpu
->arch
.apf
.gfns
[key
] = gfn
;
13111 static u32
kvm_async_pf_gfn_slot(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
13114 u32 key
= kvm_async_pf_hash_fn(gfn
);
13116 for (i
= 0; i
< ASYNC_PF_PER_VCPU
&&
13117 (vcpu
->arch
.apf
.gfns
[key
] != gfn
&&
13118 vcpu
->arch
.apf
.gfns
[key
] != ~0); i
++)
13119 key
= kvm_async_pf_next_probe(key
);
13124 bool kvm_find_async_pf_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
13126 return vcpu
->arch
.apf
.gfns
[kvm_async_pf_gfn_slot(vcpu
, gfn
)] == gfn
;
13129 static void kvm_del_async_pf_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
13133 i
= j
= kvm_async_pf_gfn_slot(vcpu
, gfn
);
13135 if (WARN_ON_ONCE(vcpu
->arch
.apf
.gfns
[i
] != gfn
))
13139 vcpu
->arch
.apf
.gfns
[i
] = ~0;
13141 j
= kvm_async_pf_next_probe(j
);
13142 if (vcpu
->arch
.apf
.gfns
[j
] == ~0)
13144 k
= kvm_async_pf_hash_fn(vcpu
->arch
.apf
.gfns
[j
]);
13146 * k lies cyclically in ]i,j]
13148 * |....j i.k.| or |.k..j i...|
13150 } while ((i
<= j
) ? (i
< k
&& k
<= j
) : (i
< k
|| k
<= j
));
13151 vcpu
->arch
.apf
.gfns
[i
] = vcpu
->arch
.apf
.gfns
[j
];
13156 static inline int apf_put_user_notpresent(struct kvm_vcpu
*vcpu
)
13158 u32 reason
= KVM_PV_REASON_PAGE_NOT_PRESENT
;
13160 return kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apf
.data
, &reason
,
13164 static inline int apf_put_user_ready(struct kvm_vcpu
*vcpu
, u32 token
)
13166 unsigned int offset
= offsetof(struct kvm_vcpu_pv_apf_data
, token
);
13168 return kvm_write_guest_offset_cached(vcpu
->kvm
, &vcpu
->arch
.apf
.data
,
13169 &token
, offset
, sizeof(token
));
13172 static inline bool apf_pageready_slot_free(struct kvm_vcpu
*vcpu
)
13174 unsigned int offset
= offsetof(struct kvm_vcpu_pv_apf_data
, token
);
13177 if (kvm_read_guest_offset_cached(vcpu
->kvm
, &vcpu
->arch
.apf
.data
,
13178 &val
, offset
, sizeof(val
)))
13184 static bool kvm_can_deliver_async_pf(struct kvm_vcpu
*vcpu
)
13187 if (!kvm_pv_async_pf_enabled(vcpu
))
13190 if (vcpu
->arch
.apf
.send_user_only
&&
13191 static_call(kvm_x86_get_cpl
)(vcpu
) == 0)
13194 if (is_guest_mode(vcpu
)) {
13196 * L1 needs to opt into the special #PF vmexits that are
13197 * used to deliver async page faults.
13199 return vcpu
->arch
.apf
.delivery_as_pf_vmexit
;
13202 * Play it safe in case the guest temporarily disables paging.
13203 * The real mode IDT in particular is unlikely to have a #PF
13206 return is_paging(vcpu
);
13210 bool kvm_can_do_async_pf(struct kvm_vcpu
*vcpu
)
13212 if (unlikely(!lapic_in_kernel(vcpu
) ||
13213 kvm_event_needs_reinjection(vcpu
) ||
13214 kvm_is_exception_pending(vcpu
)))
13217 if (kvm_hlt_in_guest(vcpu
->kvm
) && !kvm_can_deliver_async_pf(vcpu
))
13221 * If interrupts are off we cannot even use an artificial
13224 return kvm_arch_interrupt_allowed(vcpu
);
13227 bool kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
13228 struct kvm_async_pf
*work
)
13230 struct x86_exception fault
;
13232 trace_kvm_async_pf_not_present(work
->arch
.token
, work
->cr2_or_gpa
);
13233 kvm_add_async_pf_gfn(vcpu
, work
->arch
.gfn
);
13235 if (kvm_can_deliver_async_pf(vcpu
) &&
13236 !apf_put_user_notpresent(vcpu
)) {
13237 fault
.vector
= PF_VECTOR
;
13238 fault
.error_code_valid
= true;
13239 fault
.error_code
= 0;
13240 fault
.nested_page_fault
= false;
13241 fault
.address
= work
->arch
.token
;
13242 fault
.async_page_fault
= true;
13243 kvm_inject_page_fault(vcpu
, &fault
);
13247 * It is not possible to deliver a paravirtualized asynchronous
13248 * page fault, but putting the guest in an artificial halt state
13249 * can be beneficial nevertheless: if an interrupt arrives, we
13250 * can deliver it timely and perhaps the guest will schedule
13251 * another process. When the instruction that triggered a page
13252 * fault is retried, hopefully the page will be ready in the host.
13254 kvm_make_request(KVM_REQ_APF_HALT
, vcpu
);
13259 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
13260 struct kvm_async_pf
*work
)
13262 struct kvm_lapic_irq irq
= {
13263 .delivery_mode
= APIC_DM_FIXED
,
13264 .vector
= vcpu
->arch
.apf
.vec
13267 if (work
->wakeup_all
)
13268 work
->arch
.token
= ~0; /* broadcast wakeup */
13270 kvm_del_async_pf_gfn(vcpu
, work
->arch
.gfn
);
13271 trace_kvm_async_pf_ready(work
->arch
.token
, work
->cr2_or_gpa
);
13273 if ((work
->wakeup_all
|| work
->notpresent_injected
) &&
13274 kvm_pv_async_pf_enabled(vcpu
) &&
13275 !apf_put_user_ready(vcpu
, work
->arch
.token
)) {
13276 vcpu
->arch
.apf
.pageready_pending
= true;
13277 kvm_apic_set_irq(vcpu
, &irq
, NULL
);
13280 vcpu
->arch
.apf
.halted
= false;
13281 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
13284 void kvm_arch_async_page_present_queued(struct kvm_vcpu
*vcpu
)
13286 kvm_make_request(KVM_REQ_APF_READY
, vcpu
);
13287 if (!vcpu
->arch
.apf
.pageready_pending
)
13288 kvm_vcpu_kick(vcpu
);
13291 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu
*vcpu
)
13293 if (!kvm_pv_async_pf_enabled(vcpu
))
13296 return kvm_lapic_enabled(vcpu
) && apf_pageready_slot_free(vcpu
);
13299 void kvm_arch_start_assignment(struct kvm
*kvm
)
13301 if (atomic_inc_return(&kvm
->arch
.assigned_device_count
) == 1)
13302 static_call_cond(kvm_x86_pi_start_assignment
)(kvm
);
13304 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment
);
13306 void kvm_arch_end_assignment(struct kvm
*kvm
)
13308 atomic_dec(&kvm
->arch
.assigned_device_count
);
13310 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment
);
13312 bool noinstr
kvm_arch_has_assigned_device(struct kvm
*kvm
)
13314 return raw_atomic_read(&kvm
->arch
.assigned_device_count
);
13316 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device
);
13318 static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm
*kvm
)
13321 * Non-coherent DMA assignment and de-assignment will affect
13322 * whether KVM honors guest MTRRs and cause changes in memtypes
13324 * So, pass %true unconditionally to indicate non-coherent DMA was,
13325 * or will be involved, and that zapping SPTEs might be necessary.
13327 if (__kvm_mmu_honors_guest_mtrrs(true))
13328 kvm_zap_gfn_range(kvm
, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
13331 void kvm_arch_register_noncoherent_dma(struct kvm
*kvm
)
13333 if (atomic_inc_return(&kvm
->arch
.noncoherent_dma_count
) == 1)
13334 kvm_noncoherent_dma_assignment_start_or_stop(kvm
);
13336 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma
);
13338 void kvm_arch_unregister_noncoherent_dma(struct kvm
*kvm
)
13340 if (!atomic_dec_return(&kvm
->arch
.noncoherent_dma_count
))
13341 kvm_noncoherent_dma_assignment_start_or_stop(kvm
);
13343 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma
);
13345 bool kvm_arch_has_noncoherent_dma(struct kvm
*kvm
)
13347 return atomic_read(&kvm
->arch
.noncoherent_dma_count
);
13349 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma
);
13351 bool kvm_arch_has_irq_bypass(void)
13353 return enable_apicv
&& irq_remapping_cap(IRQ_POSTING_CAP
);
13356 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
13357 struct irq_bypass_producer
*prod
)
13359 struct kvm_kernel_irqfd
*irqfd
=
13360 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
13363 irqfd
->producer
= prod
;
13364 kvm_arch_start_assignment(irqfd
->kvm
);
13365 ret
= static_call(kvm_x86_pi_update_irte
)(irqfd
->kvm
,
13366 prod
->irq
, irqfd
->gsi
, 1);
13369 kvm_arch_end_assignment(irqfd
->kvm
);
13374 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
13375 struct irq_bypass_producer
*prod
)
13378 struct kvm_kernel_irqfd
*irqfd
=
13379 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
13381 WARN_ON(irqfd
->producer
!= prod
);
13382 irqfd
->producer
= NULL
;
13385 * When producer of consumer is unregistered, we change back to
13386 * remapped mode, so we can re-use the current implementation
13387 * when the irq is masked/disabled or the consumer side (KVM
13388 * int this case doesn't want to receive the interrupts.
13390 ret
= static_call(kvm_x86_pi_update_irte
)(irqfd
->kvm
, prod
->irq
, irqfd
->gsi
, 0);
13392 printk(KERN_INFO
"irq bypass consumer (token %p) unregistration"
13393 " fails: %d\n", irqfd
->consumer
.token
, ret
);
13395 kvm_arch_end_assignment(irqfd
->kvm
);
13398 int kvm_arch_update_irqfd_routing(struct kvm
*kvm
, unsigned int host_irq
,
13399 uint32_t guest_irq
, bool set
)
13401 return static_call(kvm_x86_pi_update_irte
)(kvm
, host_irq
, guest_irq
, set
);
13404 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry
*old
,
13405 struct kvm_kernel_irq_routing_entry
*new)
13407 if (new->type
!= KVM_IRQ_ROUTING_MSI
)
13410 return !!memcmp(&old
->msi
, &new->msi
, sizeof(new->msi
));
13413 bool kvm_vector_hashing_enabled(void)
13415 return vector_hashing
;
13418 bool kvm_arch_no_poll(struct kvm_vcpu
*vcpu
)
13420 return (vcpu
->arch
.msr_kvm_poll_control
& 1) == 0;
13422 EXPORT_SYMBOL_GPL(kvm_arch_no_poll
);
13425 int kvm_spec_ctrl_test_value(u64 value
)
13428 * test that setting IA32_SPEC_CTRL to given value
13429 * is allowed by the host processor
13433 unsigned long flags
;
13436 local_irq_save(flags
);
13438 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL
, &saved_value
))
13440 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL
, value
))
13443 wrmsrl(MSR_IA32_SPEC_CTRL
, saved_value
);
13445 local_irq_restore(flags
);
13449 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value
);
13451 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu
*vcpu
, gva_t gva
, u16 error_code
)
13453 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
13454 struct x86_exception fault
;
13455 u64 access
= error_code
&
13456 (PFERR_WRITE_MASK
| PFERR_FETCH_MASK
| PFERR_USER_MASK
);
13458 if (!(error_code
& PFERR_PRESENT_MASK
) ||
13459 mmu
->gva_to_gpa(vcpu
, mmu
, gva
, access
, &fault
) != INVALID_GPA
) {
13461 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
13462 * tables probably do not match the TLB. Just proceed
13463 * with the error code that the processor gave.
13465 fault
.vector
= PF_VECTOR
;
13466 fault
.error_code_valid
= true;
13467 fault
.error_code
= error_code
;
13468 fault
.nested_page_fault
= false;
13469 fault
.address
= gva
;
13470 fault
.async_page_fault
= false;
13472 vcpu
->arch
.walk_mmu
->inject_page_fault(vcpu
, &fault
);
13474 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error
);
13477 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
13478 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
13479 * indicates whether exit to userspace is needed.
13481 int kvm_handle_memory_failure(struct kvm_vcpu
*vcpu
, int r
,
13482 struct x86_exception
*e
)
13484 if (r
== X86EMUL_PROPAGATE_FAULT
) {
13485 if (KVM_BUG_ON(!e
, vcpu
->kvm
))
13488 kvm_inject_emulated_page_fault(vcpu
, e
);
13493 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
13494 * while handling a VMX instruction KVM could've handled the request
13495 * correctly by exiting to userspace and performing I/O but there
13496 * doesn't seem to be a real use-case behind such requests, just return
13497 * KVM_EXIT_INTERNAL_ERROR for now.
13499 kvm_prepare_emulation_failure_exit(vcpu
);
13503 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure
);
13505 int kvm_handle_invpcid(struct kvm_vcpu
*vcpu
, unsigned long type
, gva_t gva
)
13508 struct x86_exception e
;
13515 r
= kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
);
13516 if (r
!= X86EMUL_CONTINUE
)
13517 return kvm_handle_memory_failure(vcpu
, r
, &e
);
13519 if (operand
.pcid
>> 12 != 0) {
13520 kvm_inject_gp(vcpu
, 0);
13524 pcid_enabled
= kvm_is_cr4_bit_set(vcpu
, X86_CR4_PCIDE
);
13527 case INVPCID_TYPE_INDIV_ADDR
:
13528 if ((!pcid_enabled
&& (operand
.pcid
!= 0)) ||
13529 is_noncanonical_address(operand
.gla
, vcpu
)) {
13530 kvm_inject_gp(vcpu
, 0);
13533 kvm_mmu_invpcid_gva(vcpu
, operand
.gla
, operand
.pcid
);
13534 return kvm_skip_emulated_instruction(vcpu
);
13536 case INVPCID_TYPE_SINGLE_CTXT
:
13537 if (!pcid_enabled
&& (operand
.pcid
!= 0)) {
13538 kvm_inject_gp(vcpu
, 0);
13542 kvm_invalidate_pcid(vcpu
, operand
.pcid
);
13543 return kvm_skip_emulated_instruction(vcpu
);
13545 case INVPCID_TYPE_ALL_NON_GLOBAL
:
13547 * Currently, KVM doesn't mark global entries in the shadow
13548 * page tables, so a non-global flush just degenerates to a
13549 * global flush. If needed, we could optimize this later by
13550 * keeping track of global entries in shadow page tables.
13554 case INVPCID_TYPE_ALL_INCL_GLOBAL
:
13555 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST
, vcpu
);
13556 return kvm_skip_emulated_instruction(vcpu
);
13559 kvm_inject_gp(vcpu
, 0);
13563 EXPORT_SYMBOL_GPL(kvm_handle_invpcid
);
13565 static int complete_sev_es_emulated_mmio(struct kvm_vcpu
*vcpu
)
13567 struct kvm_run
*run
= vcpu
->run
;
13568 struct kvm_mmio_fragment
*frag
;
13571 BUG_ON(!vcpu
->mmio_needed
);
13573 /* Complete previous fragment */
13574 frag
= &vcpu
->mmio_fragments
[vcpu
->mmio_cur_fragment
];
13575 len
= min(8u, frag
->len
);
13576 if (!vcpu
->mmio_is_write
)
13577 memcpy(frag
->data
, run
->mmio
.data
, len
);
13579 if (frag
->len
<= 8) {
13580 /* Switch to the next fragment. */
13582 vcpu
->mmio_cur_fragment
++;
13584 /* Go forward to the next mmio piece. */
13590 if (vcpu
->mmio_cur_fragment
>= vcpu
->mmio_nr_fragments
) {
13591 vcpu
->mmio_needed
= 0;
13593 // VMG change, at this point, we're always done
13594 // RIP has already been advanced
13598 // More MMIO is needed
13599 run
->mmio
.phys_addr
= frag
->gpa
;
13600 run
->mmio
.len
= min(8u, frag
->len
);
13601 run
->mmio
.is_write
= vcpu
->mmio_is_write
;
13602 if (run
->mmio
.is_write
)
13603 memcpy(run
->mmio
.data
, frag
->data
, min(8u, frag
->len
));
13604 run
->exit_reason
= KVM_EXIT_MMIO
;
13606 vcpu
->arch
.complete_userspace_io
= complete_sev_es_emulated_mmio
;
13611 int kvm_sev_es_mmio_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
, unsigned int bytes
,
13615 struct kvm_mmio_fragment
*frag
;
13620 handled
= write_emultor
.read_write_mmio(vcpu
, gpa
, bytes
, data
);
13621 if (handled
== bytes
)
13628 /*TODO: Check if need to increment number of frags */
13629 frag
= vcpu
->mmio_fragments
;
13630 vcpu
->mmio_nr_fragments
= 1;
13635 vcpu
->mmio_needed
= 1;
13636 vcpu
->mmio_cur_fragment
= 0;
13638 vcpu
->run
->mmio
.phys_addr
= gpa
;
13639 vcpu
->run
->mmio
.len
= min(8u, frag
->len
);
13640 vcpu
->run
->mmio
.is_write
= 1;
13641 memcpy(vcpu
->run
->mmio
.data
, frag
->data
, min(8u, frag
->len
));
13642 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
13644 vcpu
->arch
.complete_userspace_io
= complete_sev_es_emulated_mmio
;
13648 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write
);
13650 int kvm_sev_es_mmio_read(struct kvm_vcpu
*vcpu
, gpa_t gpa
, unsigned int bytes
,
13654 struct kvm_mmio_fragment
*frag
;
13659 handled
= read_emultor
.read_write_mmio(vcpu
, gpa
, bytes
, data
);
13660 if (handled
== bytes
)
13667 /*TODO: Check if need to increment number of frags */
13668 frag
= vcpu
->mmio_fragments
;
13669 vcpu
->mmio_nr_fragments
= 1;
13674 vcpu
->mmio_needed
= 1;
13675 vcpu
->mmio_cur_fragment
= 0;
13677 vcpu
->run
->mmio
.phys_addr
= gpa
;
13678 vcpu
->run
->mmio
.len
= min(8u, frag
->len
);
13679 vcpu
->run
->mmio
.is_write
= 0;
13680 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
13682 vcpu
->arch
.complete_userspace_io
= complete_sev_es_emulated_mmio
;
13686 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read
);
13688 static void advance_sev_es_emulated_pio(struct kvm_vcpu
*vcpu
, unsigned count
, int size
)
13690 vcpu
->arch
.sev_pio_count
-= count
;
13691 vcpu
->arch
.sev_pio_data
+= count
* size
;
13694 static int kvm_sev_es_outs(struct kvm_vcpu
*vcpu
, unsigned int size
,
13695 unsigned int port
);
13697 static int complete_sev_es_emulated_outs(struct kvm_vcpu
*vcpu
)
13699 int size
= vcpu
->arch
.pio
.size
;
13700 int port
= vcpu
->arch
.pio
.port
;
13702 vcpu
->arch
.pio
.count
= 0;
13703 if (vcpu
->arch
.sev_pio_count
)
13704 return kvm_sev_es_outs(vcpu
, size
, port
);
13708 static int kvm_sev_es_outs(struct kvm_vcpu
*vcpu
, unsigned int size
,
13712 unsigned int count
=
13713 min_t(unsigned int, PAGE_SIZE
/ size
, vcpu
->arch
.sev_pio_count
);
13714 int ret
= emulator_pio_out(vcpu
, size
, port
, vcpu
->arch
.sev_pio_data
, count
);
13716 /* memcpy done already by emulator_pio_out. */
13717 advance_sev_es_emulated_pio(vcpu
, count
, size
);
13721 /* Emulation done by the kernel. */
13722 if (!vcpu
->arch
.sev_pio_count
)
13726 vcpu
->arch
.complete_userspace_io
= complete_sev_es_emulated_outs
;
13730 static int kvm_sev_es_ins(struct kvm_vcpu
*vcpu
, unsigned int size
,
13731 unsigned int port
);
13733 static int complete_sev_es_emulated_ins(struct kvm_vcpu
*vcpu
)
13735 unsigned count
= vcpu
->arch
.pio
.count
;
13736 int size
= vcpu
->arch
.pio
.size
;
13737 int port
= vcpu
->arch
.pio
.port
;
13739 complete_emulator_pio_in(vcpu
, vcpu
->arch
.sev_pio_data
);
13740 advance_sev_es_emulated_pio(vcpu
, count
, size
);
13741 if (vcpu
->arch
.sev_pio_count
)
13742 return kvm_sev_es_ins(vcpu
, size
, port
);
13746 static int kvm_sev_es_ins(struct kvm_vcpu
*vcpu
, unsigned int size
,
13750 unsigned int count
=
13751 min_t(unsigned int, PAGE_SIZE
/ size
, vcpu
->arch
.sev_pio_count
);
13752 if (!emulator_pio_in(vcpu
, size
, port
, vcpu
->arch
.sev_pio_data
, count
))
13755 /* Emulation done by the kernel. */
13756 advance_sev_es_emulated_pio(vcpu
, count
, size
);
13757 if (!vcpu
->arch
.sev_pio_count
)
13761 vcpu
->arch
.complete_userspace_io
= complete_sev_es_emulated_ins
;
13765 int kvm_sev_es_string_io(struct kvm_vcpu
*vcpu
, unsigned int size
,
13766 unsigned int port
, void *data
, unsigned int count
,
13769 vcpu
->arch
.sev_pio_data
= data
;
13770 vcpu
->arch
.sev_pio_count
= count
;
13771 return in
? kvm_sev_es_ins(vcpu
, size
, port
)
13772 : kvm_sev_es_outs(vcpu
, size
, port
);
13774 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io
);
13776 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry
);
13777 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit
);
13778 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio
);
13779 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq
);
13780 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault
);
13781 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr
);
13782 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr
);
13783 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter
);
13784 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit
);
13785 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject
);
13786 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit
);
13787 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed
);
13788 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga
);
13789 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit
);
13790 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts
);
13791 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset
);
13792 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update
);
13793 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full
);
13794 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update
);
13795 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access
);
13796 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi
);
13797 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log
);
13798 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath
);
13799 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell
);
13800 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq
);
13801 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter
);
13802 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit
);
13803 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter
);
13804 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit
);
13806 static int __init
kvm_x86_init(void)
13808 kvm_mmu_x86_module_init();
13809 mitigate_smt_rsb
&= boot_cpu_has_bug(X86_BUG_SMT_RSB
) && cpu_smt_possible();
13812 module_init(kvm_x86_init
);
13814 static void __exit
kvm_x86_exit(void)
13817 * If module_init() is implemented, module_exit() must also be
13818 * implemented to allow module unload.
13821 module_exit(kvm_x86_exit
);