1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
5 * This header defines architecture specific interfaces, x86 version
8 #ifndef _ASM_X86_KVM_HOST_H
9 #define _ASM_X86_KVM_HOST_H
11 #include <linux/types.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/tracepoint.h>
15 #include <linux/cpumask.h>
16 #include <linux/irq_work.h>
17 #include <linux/irq.h>
18 #include <linux/workqueue.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22 #include <linux/kvm_types.h>
23 #include <linux/perf_event.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/clocksource.h>
26 #include <linux/irqbypass.h>
27 #include <linux/hyperv.h>
28 #include <linux/kfifo.h>
31 #include <asm/pvclock-abi.h>
34 #include <asm/msr-index.h>
36 #include <asm/kvm_page_track.h>
37 #include <asm/kvm_vcpu_regs.h>
38 #include <asm/hyperv-tlfs.h>
40 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
43 * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if
44 * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS).
46 #ifdef CONFIG_KVM_MAX_NR_VCPUS
47 #define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS
49 #define KVM_MAX_VCPUS 1024
53 * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs
54 * might be larger than the actual number of VCPUs because the
55 * APIC ID encodes CPU topology information.
57 * In the worst case, we'll need less than one extra bit for the
58 * Core ID, and less than one extra bit for the Package (Die) ID,
59 * so ratio of 4 should be enough.
61 #define KVM_VCPU_ID_RATIO 4
62 #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
64 /* memory slots that are not exposed to userspace */
65 #define KVM_INTERNAL_MEM_SLOTS 3
67 #define KVM_HALT_POLL_NS_DEFAULT 200000
69 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
71 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
72 KVM_DIRTY_LOG_INITIALLY_SET)
74 #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
75 KVM_BUS_LOCK_DETECTION_EXIT)
77 #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
78 KVM_X86_NOTIFY_VMEXIT_USER)
80 /* x86-specific vcpu->requests bit members */
81 #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
82 #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
83 #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
84 #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
85 #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
86 #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
87 #define KVM_REQ_EVENT KVM_ARCH_REQ(6)
88 #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
89 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
90 #define KVM_REQ_NMI KVM_ARCH_REQ(9)
91 #define KVM_REQ_PMU KVM_ARCH_REQ(10)
92 #define KVM_REQ_PMI KVM_ARCH_REQ(11)
94 #define KVM_REQ_SMI KVM_ARCH_REQ(12)
96 #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
97 #define KVM_REQ_MCLOCK_INPROGRESS \
98 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
99 #define KVM_REQ_SCAN_IOAPIC \
100 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
101 #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
102 #define KVM_REQ_APIC_PAGE_RELOAD \
103 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
104 #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
105 #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
106 #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
107 #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
108 #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
109 #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
110 #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
111 #define KVM_REQ_APICV_UPDATE \
112 KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
113 #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
114 #define KVM_REQ_TLB_FLUSH_GUEST \
115 KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
116 #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
117 #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
118 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
119 KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
120 #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
121 KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
122 #define KVM_REQ_HV_TLB_FLUSH \
123 KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
125 #define CR0_RESERVED_BITS \
126 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
127 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
128 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
130 #define CR4_RESERVED_BITS \
131 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
132 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
133 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
134 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
135 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
136 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
139 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
143 #define INVALID_PAGE (~(hpa_t)0)
144 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
146 /* KVM Hugepage definitions for x86 */
147 #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
148 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
149 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
150 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
151 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
152 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
153 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
155 #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
156 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
157 #define KVM_MMU_HASH_SHIFT 12
158 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
159 #define KVM_MIN_FREE_MMU_PAGES 5
160 #define KVM_REFILL_PAGES 25
161 #define KVM_MAX_CPUID_ENTRIES 256
162 #define KVM_NR_FIXED_MTRR_REGION 88
163 #define KVM_NR_VAR_MTRR 8
165 #define ASYNC_PF_PER_VCPU 64
168 VCPU_REGS_RAX
= __VCPU_REGS_RAX
,
169 VCPU_REGS_RCX
= __VCPU_REGS_RCX
,
170 VCPU_REGS_RDX
= __VCPU_REGS_RDX
,
171 VCPU_REGS_RBX
= __VCPU_REGS_RBX
,
172 VCPU_REGS_RSP
= __VCPU_REGS_RSP
,
173 VCPU_REGS_RBP
= __VCPU_REGS_RBP
,
174 VCPU_REGS_RSI
= __VCPU_REGS_RSI
,
175 VCPU_REGS_RDI
= __VCPU_REGS_RDI
,
177 VCPU_REGS_R8
= __VCPU_REGS_R8
,
178 VCPU_REGS_R9
= __VCPU_REGS_R9
,
179 VCPU_REGS_R10
= __VCPU_REGS_R10
,
180 VCPU_REGS_R11
= __VCPU_REGS_R11
,
181 VCPU_REGS_R12
= __VCPU_REGS_R12
,
182 VCPU_REGS_R13
= __VCPU_REGS_R13
,
183 VCPU_REGS_R14
= __VCPU_REGS_R14
,
184 VCPU_REGS_R15
= __VCPU_REGS_R15
,
189 VCPU_EXREG_PDPTR
= NR_VCPU_REGS
,
195 VCPU_EXREG_EXIT_INFO_1
,
196 VCPU_EXREG_EXIT_INFO_2
,
210 enum exit_fastpath_completion
{
212 EXIT_FASTPATH_REENTER_GUEST
,
213 EXIT_FASTPATH_EXIT_HANDLED
,
215 typedef enum exit_fastpath_completion fastpath_t
;
217 struct x86_emulate_ctxt
;
218 struct x86_exception
;
221 enum x86_intercept_stage
;
223 #define KVM_NR_DB_REGS 4
225 #define DR6_BUS_LOCK (1 << 11)
226 #define DR6_BD (1 << 13)
227 #define DR6_BS (1 << 14)
228 #define DR6_BT (1 << 15)
229 #define DR6_RTM (1 << 16)
231 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
232 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
233 * they will never be 0 for now, but when they are defined
234 * in the future it will require no code change.
236 * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
238 #define DR6_ACTIVE_LOW 0xffff0ff0
239 #define DR6_VOLATILE 0x0001e80f
240 #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
242 #define DR7_BP_EN_MASK 0x000000ff
243 #define DR7_GE (1 << 9)
244 #define DR7_GD (1 << 13)
245 #define DR7_FIXED_1 0x00000400
246 #define DR7_VOLATILE 0xffff2bff
248 #define KVM_GUESTDBG_VALID_MASK \
249 (KVM_GUESTDBG_ENABLE | \
250 KVM_GUESTDBG_SINGLESTEP | \
251 KVM_GUESTDBG_USE_HW_BP | \
252 KVM_GUESTDBG_USE_SW_BP | \
253 KVM_GUESTDBG_INJECT_BP | \
254 KVM_GUESTDBG_INJECT_DB | \
255 KVM_GUESTDBG_BLOCKIRQ)
258 #define PFERR_PRESENT_BIT 0
259 #define PFERR_WRITE_BIT 1
260 #define PFERR_USER_BIT 2
261 #define PFERR_RSVD_BIT 3
262 #define PFERR_FETCH_BIT 4
263 #define PFERR_PK_BIT 5
264 #define PFERR_SGX_BIT 15
265 #define PFERR_GUEST_FINAL_BIT 32
266 #define PFERR_GUEST_PAGE_BIT 33
267 #define PFERR_IMPLICIT_ACCESS_BIT 48
269 #define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
270 #define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
271 #define PFERR_USER_MASK BIT(PFERR_USER_BIT)
272 #define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
273 #define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
274 #define PFERR_PK_MASK BIT(PFERR_PK_BIT)
275 #define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
276 #define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
277 #define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
278 #define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
280 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
284 /* apic attention bits */
285 #define KVM_APIC_CHECK_VAPIC 0
287 * The following bit is set with PV-EOI, unset on EOI.
288 * We detect PV-EOI changes by guest by comparing
289 * this bit with PV-EOI in guest memory.
290 * See the implementation in apic_update_pv_eoi.
292 #define KVM_APIC_PV_EOI_PENDING 1
294 struct kvm_kernel_irq_routing_entry
;
297 * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
298 * also includes TDP pages) to determine whether or not a page can be used in
299 * the given MMU context. This is a subset of the overall kvm_cpu_role to
300 * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows
301 * allocating 2 bytes per gfn instead of 4 bytes per gfn.
303 * Upper-level shadow pages having gptes are tracked for write-protection via
304 * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must
305 * not create more than 2^16-1 upper-level shadow pages at a single gfn,
306 * otherwise gfn_write_track will overflow and explosions will ensue.
308 * A unique shadow page (SP) for a gfn is created if and only if an existing SP
309 * cannot be reused. The ability to reuse a SP is tracked by its role, which
310 * incorporates various mode bits and properties of the SP. Roughly speaking,
311 * the number of unique SPs that can theoretically be created is 2^n, where n
312 * is the number of bits that are used to compute the role.
314 * But, even though there are 19 bits in the mask below, not all combinations
315 * of modes and flags are possible:
317 * - invalid shadow pages are not accounted, so the bits are effectively 18
319 * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
320 * execonly and ad_disabled are only used for nested EPT which has
321 * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
323 * - the 4 bits of level are effectively limited to the values 2/3/4/5,
324 * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
325 * paging has exactly one upper level, making level completely redundant
326 * when has_4_byte_gpte=1.
328 * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
329 * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
331 * Therefore, the maximum number of possible upper-level shadow pages for a
332 * single gfn is a bit less than 2^13.
334 union kvm_mmu_page_role
{
338 unsigned has_4_byte_gpte
:1;
345 unsigned smep_andnot_wp
:1;
346 unsigned smap_andnot_wp
:1;
347 unsigned ad_disabled
:1;
348 unsigned guest_mode
:1;
349 unsigned passthrough
:1;
353 * This is left at the top of the word so that
354 * kvm_memslots_for_spte_role can extract it with a
355 * simple shift. While there is room, give it a whole
356 * byte so it is also faster to load it from memory.
363 * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
364 * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
365 * including on nested transitions, if nothing in the full role changes then
366 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
367 * don't treat all-zero structure as valid data.
369 * The properties that are tracked in the extended role but not the page role
370 * are for things that either (a) do not affect the validity of the shadow page
371 * or (b) are indirectly reflected in the shadow page's role. For example,
372 * CR4.PKE only affects permission checks for software walks of the guest page
373 * tables (because KVM doesn't support Protection Keys with shadow paging), and
374 * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
376 * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
377 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
378 * SMAP, but the MMU's permission checks for software walks need to be SMEP and
379 * SMAP aware regardless of CR0.WP.
381 union kvm_mmu_extended_role
{
384 unsigned int valid
:1;
385 unsigned int execonly
:1;
386 unsigned int cr4_pse
:1;
387 unsigned int cr4_pke
:1;
388 unsigned int cr4_smap
:1;
389 unsigned int cr4_smep
:1;
390 unsigned int cr4_la57
:1;
391 unsigned int efer_lma
:1;
398 union kvm_mmu_page_role base
;
399 union kvm_mmu_extended_role ext
;
403 struct kvm_rmap_head
{
407 struct kvm_pio_request
{
408 unsigned long linear_rip
;
415 #define PT64_ROOT_MAX_LEVEL 5
417 struct rsvd_bits_validate
{
418 u64 rsvd_bits_mask
[2][PT64_ROOT_MAX_LEVEL
];
422 struct kvm_mmu_root_info
{
427 #define KVM_MMU_ROOT_INFO_INVALID \
428 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
430 #define KVM_MMU_NUM_PREV_ROOTS 3
432 #define KVM_MMU_ROOT_CURRENT BIT(0)
433 #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
434 #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
436 #define KVM_HAVE_MMU_RWLOCK
439 struct kvm_page_fault
;
442 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
443 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
447 unsigned long (*get_guest_pgd
)(struct kvm_vcpu
*vcpu
);
448 u64 (*get_pdptr
)(struct kvm_vcpu
*vcpu
, int index
);
449 int (*page_fault
)(struct kvm_vcpu
*vcpu
, struct kvm_page_fault
*fault
);
450 void (*inject_page_fault
)(struct kvm_vcpu
*vcpu
,
451 struct x86_exception
*fault
);
452 gpa_t (*gva_to_gpa
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
453 gpa_t gva_or_gpa
, u64 access
,
454 struct x86_exception
*exception
);
455 int (*sync_spte
)(struct kvm_vcpu
*vcpu
,
456 struct kvm_mmu_page
*sp
, int i
);
457 struct kvm_mmu_root_info root
;
458 union kvm_cpu_role cpu_role
;
459 union kvm_mmu_page_role root_role
;
462 * The pkru_mask indicates if protection key checks are needed. It
463 * consists of 16 domains indexed by page fault error code bits [4:1],
464 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
465 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
469 struct kvm_mmu_root_info prev_roots
[KVM_MMU_NUM_PREV_ROOTS
];
472 * Bitmap; bit set = permission fault
473 * Byte index: page fault error code [4:1]
474 * Bit index: pte permissions in ACC_* format
483 * check zero bits on shadow page table entries, these
484 * bits include not only hardware reserved bits but also
485 * the bits spte never used.
487 struct rsvd_bits_validate shadow_zero_check
;
489 struct rsvd_bits_validate guest_rsvd_check
;
491 u64 pdptrs
[4]; /* pae */
505 * Base value of the PMC counter, relative to the *consumed* count in
506 * the associated perf_event. This value includes counter updates from
507 * the perf_event and emulated_count since the last time the counter
508 * was reprogrammed, but it is *not* the current value as seen by the
509 * guest or userspace.
511 * The count is relative to the associated perf_event so that KVM
512 * doesn't need to reprogram the perf_event every time the guest writes
517 * PMC events triggered by KVM emulation that haven't been fully
518 * processed, i.e. haven't undergone overflow detection.
520 u64 emulated_counter
;
522 struct perf_event
*perf_event
;
523 struct kvm_vcpu
*vcpu
;
525 * only for creating or reusing perf_event,
526 * eventsel value for general purpose counters,
527 * ctrl value for fixed counters.
532 /* More counters may conflict with other existing Architectural MSRs */
533 #define KVM_INTEL_PMC_MAX_GENERIC 8
534 #define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
535 #define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
536 #define KVM_PMC_MAX_FIXED 3
537 #define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
538 #define KVM_AMD_PMC_MAX_GENERIC 6
541 unsigned nr_arch_gp_counters
;
542 unsigned nr_arch_fixed_counters
;
543 unsigned available_event_types
;
545 u64 fixed_ctr_ctrl_mask
;
548 u64 counter_bitmask
[2];
549 u64 global_ctrl_mask
;
550 u64 global_status_mask
;
553 struct kvm_pmc gp_counters
[KVM_INTEL_PMC_MAX_GENERIC
];
554 struct kvm_pmc fixed_counters
[KVM_PMC_MAX_FIXED
];
557 * Overlay the bitmap with a 64-bit atomic so that all bits can be
558 * set in a single access, e.g. to reprogram all counters when the PMU
562 DECLARE_BITMAP(reprogram_pmi
, X86_PMC_IDX_MAX
);
563 atomic64_t __reprogram_pmi
;
565 DECLARE_BITMAP(all_valid_pmc_idx
, X86_PMC_IDX_MAX
);
566 DECLARE_BITMAP(pmc_in_use
, X86_PMC_IDX_MAX
);
570 u64 pebs_enable_mask
;
572 u64 pebs_data_cfg_mask
;
575 * If a guest counter is cross-mapped to host counter with different
576 * index, its PEBS capability will be temporarily disabled.
578 * The user should make sure that this mask is updated
579 * after disabling interrupts and before perf_guest_get_msrs();
581 u64 host_cross_mapped_mask
;
584 * The gate to release perf_events not marked in
585 * pmc_in_use only once in a vcpu time slice.
590 * The total number of programmed perf_events and it helps to avoid
591 * redundant check before cleanup if guest don't use vPMU at all.
599 KVM_DEBUGREG_BP_ENABLED
= 1,
600 KVM_DEBUGREG_WONT_EXIT
= 2,
603 struct kvm_mtrr_range
{
606 struct list_head node
;
610 struct kvm_mtrr_range var_ranges
[KVM_NR_VAR_MTRR
];
611 mtrr_type fixed_ranges
[KVM_NR_FIXED_MTRR_REGION
];
614 struct list_head head
;
617 /* Hyper-V SynIC timer */
618 struct kvm_vcpu_hv_stimer
{
619 struct hrtimer timer
;
621 union hv_stimer_config config
;
624 struct hv_message msg
;
628 /* Hyper-V synthetic interrupt controller (SynIC)*/
629 struct kvm_vcpu_hv_synic
{
634 atomic64_t sint
[HV_SYNIC_SINT_COUNT
];
635 atomic_t sint_to_gsi
[HV_SYNIC_SINT_COUNT
];
636 DECLARE_BITMAP(auto_eoi_bitmap
, 256);
637 DECLARE_BITMAP(vec_bitmap
, 256);
639 bool dont_zero_synic_pages
;
642 /* The maximum number of entries on the TLB flush fifo. */
643 #define KVM_HV_TLB_FLUSH_FIFO_SIZE (16)
645 * Note: the following 'magic' entry is made up by KVM to avoid putting
646 * anything besides GVA on the TLB flush fifo. It is theoretically possible
647 * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000
648 * which will look identical. KVM's action to 'flush everything' instead of
649 * flushing these particular addresses is, however, fully legitimate as
650 * flushing more than requested is always OK.
652 #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
654 enum hv_tlb_flush_fifos
{
655 HV_L1_TLB_FLUSH_FIFO
,
656 HV_L2_TLB_FLUSH_FIFO
,
657 HV_NR_TLB_FLUSH_FIFOS
,
660 struct kvm_vcpu_hv_tlb_flush_fifo
{
661 spinlock_t write_lock
;
662 DECLARE_KFIFO(entries
, u64
, KVM_HV_TLB_FLUSH_FIFO_SIZE
);
665 /* Hyper-V per vcpu emulation context */
667 struct kvm_vcpu
*vcpu
;
671 struct kvm_vcpu_hv_synic synic
;
672 struct kvm_hyperv_exit exit
;
673 struct kvm_vcpu_hv_stimer stimer
[HV_SYNIC_STIMER_COUNT
];
674 DECLARE_BITMAP(stimer_pending_bitmap
, HV_SYNIC_STIMER_COUNT
);
677 u32 features_eax
; /* HYPERV_CPUID_FEATURES.EAX */
678 u32 features_ebx
; /* HYPERV_CPUID_FEATURES.EBX */
679 u32 features_edx
; /* HYPERV_CPUID_FEATURES.EDX */
680 u32 enlightenments_eax
; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
681 u32 enlightenments_ebx
; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
682 u32 syndbg_cap_eax
; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
683 u32 nested_eax
; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
684 u32 nested_ebx
; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
687 struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo
[HV_NR_TLB_FLUSH_FIFOS
];
689 /* Preallocated buffer for handling hypercalls passing sparse vCPU set */
690 u64 sparse_banks
[HV_MAX_SPARSE_VCPU_BANKS
];
692 struct hv_vp_assist_page vp_assist_page
;
701 struct kvm_hypervisor_cpuid
{
706 #ifdef CONFIG_KVM_XEN
707 /* Xen HVM per vcpu emulation context */
708 struct kvm_vcpu_xen
{
710 u32 current_runstate
;
712 struct gfn_to_pfn_cache vcpu_info_cache
;
713 struct gfn_to_pfn_cache vcpu_time_info_cache
;
714 struct gfn_to_pfn_cache runstate_cache
;
715 struct gfn_to_pfn_cache runstate2_cache
;
717 u64 runstate_entry_time
;
718 u64 runstate_times
[4];
719 unsigned long evtchn_pending_sel
;
720 u32 vcpu_id
; /* The Xen / ACPI vCPU ID */
722 u64 timer_expires
; /* In guest epoch */
723 atomic_t timer_pending
;
724 struct hrtimer timer
;
726 struct timer_list poll_timer
;
727 struct kvm_hypervisor_cpuid cpuid
;
731 struct kvm_queued_exception
{
737 unsigned long payload
;
741 struct kvm_vcpu_arch
{
743 * rip and regs accesses must go through
744 * kvm_{register,rip}_{read,write} functions.
746 unsigned long regs
[NR_VCPU_REGS
];
751 unsigned long cr0_guest_owned_bits
;
755 unsigned long cr4_guest_owned_bits
;
756 unsigned long cr4_guest_rsvd_bits
;
763 struct kvm_lapic
*apic
; /* kernel irqchip context */
764 bool load_eoi_exitmap_pending
;
765 DECLARE_BITMAP(ioapic_handled_vectors
, 256);
766 unsigned long apic_attention
;
767 int32_t apic_arb_prio
;
769 u64 ia32_misc_enable_msr
;
772 bool at_instruction_boundary
;
773 bool tpr_access_reporting
;
774 bool xfd_no_write_intercept
;
776 u64 microcode_version
;
777 u64 arch_capabilities
;
778 u64 perf_capabilities
;
781 * Paging state of the vcpu
783 * If the vcpu runs in guest mode with two level paging this still saves
784 * the paging mode of the l1 guest. This context is always used to
789 /* Non-nested MMU for L1 */
790 struct kvm_mmu root_mmu
;
792 /* L1 MMU when running nested */
793 struct kvm_mmu guest_mmu
;
796 * Paging state of an L2 guest (used for nested npt)
798 * This context will save all necessary information to walk page tables
799 * of an L2 guest. This context is only initialized for page table
800 * walking and not for faulting since we never handle l2 page faults on
803 struct kvm_mmu nested_mmu
;
806 * Pointer to the mmu context currently used for
807 * gva_to_gpa translations.
809 struct kvm_mmu
*walk_mmu
;
811 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache
;
812 struct kvm_mmu_memory_cache mmu_shadow_page_cache
;
813 struct kvm_mmu_memory_cache mmu_shadowed_info_cache
;
814 struct kvm_mmu_memory_cache mmu_page_header_cache
;
817 * QEMU userspace and the guest each have their own FPU state.
818 * In vcpu_run, we switch between the user and guest FPU contexts.
819 * While running a VCPU, the VCPU thread will have the guest FPU
822 * Note that while the PKRU state lives inside the fpu registers,
823 * it is switched out separately at VMENTER and VMEXIT time. The
824 * "guest_fpstate" state here contains the guest FPU context, with the
827 struct fpu_guest guest_fpu
;
830 u64 guest_supported_xcr0
;
832 struct kvm_pio_request pio
;
835 unsigned sev_pio_count
;
837 u8 event_exit_inst_len
;
839 bool exception_from_userspace
;
841 /* Exceptions to be injected to the guest. */
842 struct kvm_queued_exception exception
;
843 /* Exception VM-Exits to be synthesized to L1. */
844 struct kvm_queued_exception exception_vmexit
;
846 struct kvm_queued_interrupt
{
852 int halt_request
; /* real mode on Intel only */
855 struct kvm_cpuid_entry2
*cpuid_entries
;
856 struct kvm_hypervisor_cpuid kvm_cpuid
;
859 * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
860 * when "struct kvm_vcpu_arch" is no longer defined in an
861 * arch/x86/include/asm header. The max is mostly arbitrary, i.e.
862 * can be increased as necessary.
864 #define KVM_MAX_NR_GOVERNED_FEATURES BITS_PER_LONG
867 * Track whether or not the guest is allowed to use features that are
868 * governed by KVM, where "governed" means KVM needs to manage state
869 * and/or explicitly enable the feature in hardware. Typically, but
870 * not always, governed features can be used by the guest if and only
871 * if both KVM and userspace want to expose the feature to the guest.
874 DECLARE_BITMAP(enabled
, KVM_MAX_NR_GOVERNED_FEATURES
);
877 u64 reserved_gpa_bits
;
880 /* emulate context */
882 struct x86_emulate_ctxt
*emulate_ctxt
;
883 bool emulate_regs_need_sync_to_vcpu
;
884 bool emulate_regs_need_sync_from_vcpu
;
885 int (*complete_userspace_io
)(struct kvm_vcpu
*vcpu
);
888 struct pvclock_vcpu_time_info hv_clock
;
889 unsigned int hw_tsc_khz
;
890 struct gfn_to_pfn_cache pv_time
;
891 /* set guest stopped flag in pvclock flags field */
892 bool pvclock_set_guest_stopped_request
;
898 struct gfn_to_hva_cache cache
;
902 u64 tsc_offset
; /* current tsc offset */
905 u64 tsc_offset_adjustment
;
908 u64 this_tsc_generation
;
910 bool tsc_always_catchup
;
911 s8 virtual_tsc_shift
;
912 u32 virtual_tsc_mult
;
914 s64 ia32_tsc_adjust_msr
;
915 u64 msr_ia32_power_ctl
;
916 u64 l1_tsc_scaling_ratio
;
917 u64 tsc_scaling_ratio
; /* current scaling ratio */
919 atomic_t nmi_queued
; /* unprocessed asynchronous NMIs */
920 /* Number of NMIs pending injection, not including hardware vNMIs. */
921 unsigned int nmi_pending
;
922 bool nmi_injected
; /* Trying to inject an NMI this entry */
923 bool smi_pending
; /* SMI queued after currently running handler */
924 u8 handling_intr_from_guest
;
926 struct kvm_mtrr mtrr_state
;
929 unsigned switch_db_regs
;
930 unsigned long db
[KVM_NR_DB_REGS
];
933 unsigned long eff_db
[KVM_NR_DB_REGS
];
934 unsigned long guest_debug_dr7
;
935 u64 msr_platform_info
;
936 u64 msr_misc_features_enables
;
945 /* Cache MMIO info */
947 unsigned mmio_access
;
953 /* used for guest single stepping over the given code position */
954 unsigned long singlestep_rip
;
956 #ifdef CONFIG_KVM_HYPERV
958 struct kvm_vcpu_hv
*hyperv
;
960 #ifdef CONFIG_KVM_XEN
961 struct kvm_vcpu_xen xen
;
963 cpumask_var_t wbinvd_dirty_mask
;
965 unsigned long last_retry_eip
;
966 unsigned long last_retry_addr
;
970 gfn_t gfns
[ASYNC_PF_PER_VCPU
];
971 struct gfn_to_hva_cache data
;
972 u64 msr_en_val
; /* MSR_KVM_ASYNC_PF_EN */
973 u64 msr_int_val
; /* MSR_KVM_ASYNC_PF_INT */
978 bool delivery_as_pf_vmexit
;
979 bool pageready_pending
;
982 /* OSVW MSRs (AMD only) */
990 struct gfn_to_hva_cache data
;
993 u64 msr_kvm_poll_control
;
995 /* set at EPT violation at this point */
996 unsigned long exit_qualification
;
998 /* pv related host specific info */
1003 int pending_ioapic_eoi
;
1004 int pending_external_vector
;
1006 /* be preempted when it's in kernel-mode(cpl=0) */
1007 bool preempted_in_kernel
;
1009 /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
1010 bool l1tf_flush_l1d
;
1012 /* Host CPU on which VM-entry was most recently attempted */
1013 int last_vmentry_cpu
;
1015 /* AMD MSRC001_0015 Hardware Configuration */
1018 /* pv related cpuid info */
1021 * value of the eax register in the KVM_CPUID_FEATURES CPUID
1027 * indicates whether pv emulation should be disabled if features
1028 * are not present in the guest's cpuid
1033 /* Protected Guests */
1034 bool guest_state_protected
;
1037 * Set when PDPTS were loaded directly by the userspace without
1038 * reading the guest memory
1040 bool pdptrs_from_userspace
;
1042 #if IS_ENABLED(CONFIG_HYPERV)
1047 struct kvm_lpage_info
{
1051 struct kvm_arch_memory_slot
{
1052 struct kvm_rmap_head
*rmap
[KVM_NR_PAGE_SIZES
];
1053 struct kvm_lpage_info
*lpage_info
[KVM_NR_PAGE_SIZES
- 1];
1054 unsigned short *gfn_write_track
;
1058 * Track the mode of the optimized logical map, as the rules for decoding the
1059 * destination vary per mode. Enabling the optimized logical map requires all
1060 * software-enabled local APIs to be in the same mode, each addressable APIC to
1061 * be mapped to only one MDA, and each MDA to map to at most one APIC.
1063 enum kvm_apic_logical_mode
{
1064 /* All local APICs are software disabled. */
1065 KVM_APIC_MODE_SW_DISABLED
,
1066 /* All software enabled local APICs in xAPIC cluster addressing mode. */
1067 KVM_APIC_MODE_XAPIC_CLUSTER
,
1068 /* All software enabled local APICs in xAPIC flat addressing mode. */
1069 KVM_APIC_MODE_XAPIC_FLAT
,
1070 /* All software enabled local APICs in x2APIC mode. */
1071 KVM_APIC_MODE_X2APIC
,
1073 * Optimized map disabled, e.g. not all local APICs in the same logical
1074 * mode, same logical ID assigned to multiple APICs, etc.
1076 KVM_APIC_MODE_MAP_DISABLED
,
1079 struct kvm_apic_map
{
1080 struct rcu_head rcu
;
1081 enum kvm_apic_logical_mode logical_mode
;
1084 struct kvm_lapic
*xapic_flat_map
[8];
1085 struct kvm_lapic
*xapic_cluster_map
[16][4];
1087 struct kvm_lapic
*phys_map
[];
1090 /* Hyper-V synthetic debugger (SynDbg)*/
1091 struct kvm_hv_syndbg
{
1102 /* Current state of Hyper-V TSC page clocksource */
1103 enum hv_tsc_page_status
{
1104 /* TSC page was not set up or disabled */
1105 HV_TSC_PAGE_UNSET
= 0,
1106 /* TSC page MSR was written by the guest, update pending */
1107 HV_TSC_PAGE_GUEST_CHANGED
,
1108 /* TSC page update was triggered from the host side */
1109 HV_TSC_PAGE_HOST_CHANGED
,
1110 /* TSC page was properly set up and is currently active */
1112 /* TSC page was set up with an inaccessible GPA */
1116 #ifdef CONFIG_KVM_HYPERV
1117 /* Hyper-V emulation context */
1119 struct mutex hv_lock
;
1123 enum hv_tsc_page_status hv_tsc_page_status
;
1125 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1126 u64 hv_crash_param
[HV_X64_MSR_CRASH_PARAMS
];
1129 struct ms_hyperv_tsc_page tsc_ref
;
1131 struct idr conn_to_evt
;
1133 u64 hv_reenlightenment_control
;
1134 u64 hv_tsc_emulation_control
;
1135 u64 hv_tsc_emulation_status
;
1136 u64 hv_invtsc_control
;
1138 /* How many vCPUs have VP index != vCPU index */
1139 atomic_t num_mismatched_vp_indexes
;
1142 * How many SynICs use 'AutoEOI' feature
1143 * (protected by arch.apicv_update_lock)
1145 unsigned int synic_auto_eoi_used
;
1147 struct kvm_hv_syndbg hv_syndbg
;
1151 struct msr_bitmap_range
{
1155 unsigned long *bitmap
;
1158 #ifdef CONFIG_KVM_XEN
1159 /* Xen emulation context */
1161 struct mutex xen_lock
;
1164 bool runstate_update_flag
;
1166 struct gfn_to_pfn_cache shinfo_cache
;
1167 struct idr evtchn_ports
;
1168 unsigned long poll_mask
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
1172 enum kvm_irqchip_mode
{
1174 KVM_IRQCHIP_KERNEL
, /* created with KVM_CREATE_IRQCHIP */
1175 KVM_IRQCHIP_SPLIT
, /* created with KVM_CAP_SPLIT_IRQCHIP */
1178 struct kvm_x86_msr_filter
{
1180 bool default_allow
:1;
1181 struct msr_bitmap_range ranges
[16];
1184 struct kvm_x86_pmu_event_filter
{
1187 __u32 fixed_counter_bitmap
;
1196 enum kvm_apicv_inhibit
{
1198 /********************************************************************/
1199 /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
1200 /********************************************************************/
1203 * APIC acceleration is disabled by a module parameter
1204 * and/or not supported in hardware.
1206 APICV_INHIBIT_REASON_DISABLE
,
1209 * APIC acceleration is inhibited because AutoEOI feature is
1210 * being used by a HyperV guest.
1212 APICV_INHIBIT_REASON_HYPERV
,
1215 * APIC acceleration is inhibited because the userspace didn't yet
1216 * enable the kernel/split irqchip.
1218 APICV_INHIBIT_REASON_ABSENT
,
1220 /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
1221 * (out of band, debug measure of blocking all interrupts on this vCPU)
1222 * was enabled, to avoid AVIC/APICv bypassing it.
1224 APICV_INHIBIT_REASON_BLOCKIRQ
,
1227 * APICv is disabled because not all vCPUs have a 1:1 mapping between
1228 * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack.
1230 APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED
,
1233 * For simplicity, the APIC acceleration is inhibited
1234 * first time either APIC ID or APIC base are changed by the guest
1235 * from their reset values.
1237 APICV_INHIBIT_REASON_APIC_ID_MODIFIED
,
1238 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED
,
1240 /******************************************************/
1241 /* INHIBITs that are relevant only to the AMD's AVIC. */
1242 /******************************************************/
1245 * AVIC is inhibited on a vCPU because it runs a nested guest.
1247 * This is needed because unlike APICv, the peers of this vCPU
1248 * cannot use the doorbell mechanism to signal interrupts via AVIC when
1249 * a vCPU runs nested.
1251 APICV_INHIBIT_REASON_NESTED
,
1254 * On SVM, the wait for the IRQ window is implemented with pending vIRQ,
1255 * which cannot be injected when the AVIC is enabled, thus AVIC
1256 * is inhibited while KVM waits for IRQ window.
1258 APICV_INHIBIT_REASON_IRQWIN
,
1261 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1262 * which AVIC doesn't support for edge triggered interrupts.
1264 APICV_INHIBIT_REASON_PIT_REINJ
,
1267 * AVIC is disabled because SEV doesn't support it.
1269 APICV_INHIBIT_REASON_SEV
,
1272 * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1
1273 * mapping between logical ID and vCPU.
1275 APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED
,
1279 unsigned long vm_type
;
1280 unsigned long n_used_mmu_pages
;
1281 unsigned long n_requested_mmu_pages
;
1282 unsigned long n_max_mmu_pages
;
1283 unsigned int indirect_shadow_pages
;
1285 struct hlist_head mmu_page_hash
[KVM_NUM_MMU_PAGES
];
1286 struct list_head active_mmu_pages
;
1287 struct list_head zapped_obsolete_pages
;
1289 * A list of kvm_mmu_page structs that, if zapped, could possibly be
1290 * replaced by an NX huge page. A shadow page is on this list if its
1291 * existence disallows an NX huge page (nx_huge_page_disallowed is set)
1292 * and there are no other conditions that prevent a huge page, e.g.
1293 * the backing host page is huge, dirtly logging is not enabled for its
1294 * memslot, etc... Note, zapping shadow pages on this list doesn't
1295 * guarantee an NX huge page will be created in its stead, e.g. if the
1296 * guest attempts to execute from the region then KVM obviously can't
1297 * create an NX huge page (without hanging the guest).
1299 struct list_head possible_nx_huge_pages
;
1300 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1301 struct kvm_page_track_notifier_head track_notifier_head
;
1304 * Protects marking pages unsync during page faults, as TDP MMU page
1305 * faults only take mmu_lock for read. For simplicity, the unsync
1306 * pages lock is always taken when marking pages unsync regardless of
1307 * whether mmu_lock is held for read or write.
1309 spinlock_t mmu_unsync_pages_lock
;
1311 struct iommu_domain
*iommu_domain
;
1312 bool iommu_noncoherent
;
1313 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1314 atomic_t noncoherent_dma_count
;
1315 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1316 atomic_t assigned_device_count
;
1317 struct kvm_pic
*vpic
;
1318 struct kvm_ioapic
*vioapic
;
1319 struct kvm_pit
*vpit
;
1320 atomic_t vapics_in_nmi_mode
;
1321 struct mutex apic_map_lock
;
1322 struct kvm_apic_map __rcu
*apic_map
;
1323 atomic_t apic_map_dirty
;
1325 bool apic_access_memslot_enabled
;
1326 bool apic_access_memslot_inhibited
;
1328 /* Protects apicv_inhibit_reasons */
1329 struct rw_semaphore apicv_update_lock
;
1330 unsigned long apicv_inhibit_reasons
;
1334 bool mwait_in_guest
;
1336 bool pause_in_guest
;
1337 bool cstate_in_guest
;
1339 unsigned long irq_sources_bitmap
;
1340 s64 kvmclock_offset
;
1343 * This also protects nr_vcpus_matched_tsc which is read from a
1344 * preemption-disabled region, so it must be a raw spinlock.
1346 raw_spinlock_t tsc_write_lock
;
1350 u64 last_tsc_offset
;
1354 u64 cur_tsc_generation
;
1355 int nr_vcpus_matched_tsc
;
1357 u32 default_tsc_khz
;
1360 seqcount_raw_spinlock_t pvclock_sc
;
1361 bool use_master_clock
;
1362 u64 master_kernel_ns
;
1363 u64 master_cycle_now
;
1364 struct delayed_work kvmclock_update_work
;
1365 struct delayed_work kvmclock_sync_work
;
1367 struct kvm_xen_hvm_config xen_hvm_config
;
1369 /* reads protected by irq_srcu, writes by irq_lock */
1370 struct hlist_head mask_notifier_list
;
1372 #ifdef CONFIG_KVM_HYPERV
1373 struct kvm_hv hyperv
;
1376 #ifdef CONFIG_KVM_XEN
1380 bool backwards_tsc_observed
;
1381 bool boot_vcpu_runs_old_kvmclock
;
1384 u64 disabled_quirks
;
1386 enum kvm_irqchip_mode irqchip_mode
;
1387 u8 nr_reserved_ioapic_pins
;
1389 bool disabled_lapic_found
;
1392 bool x2apic_broadcast_quirk_disabled
;
1394 bool guest_can_read_msr_platform_info
;
1395 bool exception_payload_enabled
;
1397 bool triple_fault_event
;
1399 bool bus_lock_detection_enabled
;
1403 u32 notify_vmexit_flags
;
1405 * If exit_on_emulation_error is set, and the in-kernel instruction
1406 * emulator fails to emulate an instruction, allow userspace
1407 * the opportunity to look at it.
1409 bool exit_on_emulation_error
;
1411 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
1412 u32 user_space_msr_mask
;
1413 struct kvm_x86_msr_filter __rcu
*msr_filter
;
1415 u32 hypercall_exit_enabled
;
1417 /* Guest can access the SGX PROVISIONKEY. */
1418 bool sgx_provisioning_allowed
;
1420 struct kvm_x86_pmu_event_filter __rcu
*pmu_event_filter
;
1421 struct task_struct
*nx_huge_page_recovery_thread
;
1423 #ifdef CONFIG_X86_64
1424 /* The number of TDP MMU pages across all roots. */
1425 atomic64_t tdp_mmu_pages
;
1428 * List of struct kvm_mmu_pages being used as roots.
1429 * All struct kvm_mmu_pages in the list should have
1432 * For reads, this list is protected by:
1433 * the MMU lock in read mode + RCU or
1434 * the MMU lock in write mode
1436 * For writes, this list is protected by tdp_mmu_pages_lock; see
1437 * below for the details.
1439 * Roots will remain in the list until their tdp_mmu_root_count
1440 * drops to zero, at which point the thread that decremented the
1441 * count to zero should removed the root from the list and clean
1442 * it up, freeing the root after an RCU grace period.
1444 struct list_head tdp_mmu_roots
;
1447 * Protects accesses to the following fields when the MMU lock
1448 * is held in read mode:
1449 * - tdp_mmu_roots (above)
1450 * - the link field of kvm_mmu_page structs used by the TDP MMU
1451 * - possible_nx_huge_pages;
1452 * - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1454 * Because the lock is only taken within the MMU lock, strictly
1455 * speaking it is redundant to acquire this lock when the thread
1456 * holds the MMU lock in write mode. However it often simplifies
1457 * the code to do so.
1459 spinlock_t tdp_mmu_pages_lock
;
1460 #endif /* CONFIG_X86_64 */
1463 * If set, at least one shadow root has been allocated. This flag
1464 * is used as one input when determining whether certain memslot
1465 * related allocations are necessary.
1467 bool shadow_root_allocated
;
1469 #if IS_ENABLED(CONFIG_HYPERV)
1471 spinlock_t hv_root_tdp_lock
;
1472 struct hv_partition_assist_pg
*hv_pa_pg
;
1475 * VM-scope maximum vCPU ID. Used to determine the size of structures
1476 * that increase along with the maximum vCPU ID, in which case, using
1477 * the global KVM_MAX_VCPU_IDS may lead to significant memory waste.
1481 bool disable_nx_huge_pages
;
1484 * Memory caches used to allocate shadow pages when performing eager
1485 * page splitting. No need for a shadowed_info_cache since eager page
1486 * splitting only allocates direct shadow pages.
1488 * Protected by kvm->slots_lock.
1490 struct kvm_mmu_memory_cache split_shadow_page_cache
;
1491 struct kvm_mmu_memory_cache split_page_header_cache
;
1494 * Memory cache used to allocate pte_list_desc structs while splitting
1495 * huge pages. In the worst case, to split one huge page, 512
1496 * pte_list_desc structs are needed to add each lower level leaf sptep
1497 * to the rmap plus 1 to extend the parent_ptes rmap of the lower level
1500 * Protected by kvm->slots_lock.
1502 #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
1503 struct kvm_mmu_memory_cache split_desc_cache
;
1506 struct kvm_vm_stat
{
1507 struct kvm_vm_stat_generic generic
;
1508 u64 mmu_shadow_zapped
;
1517 atomic64_t pages_4k
;
1518 atomic64_t pages_2m
;
1519 atomic64_t pages_1g
;
1521 atomic64_t pages
[KVM_NR_PAGE_SIZES
];
1523 u64 nx_lpage_splits
;
1524 u64 max_mmu_page_hash_collisions
;
1525 u64 max_mmu_rmap_size
;
1528 struct kvm_vcpu_stat
{
1529 struct kvm_vcpu_stat_generic generic
;
1535 u64 pf_mmio_spte_created
;
1544 u64 irq_window_exits
;
1545 u64 nmi_window_exits
;
1548 u64 request_irq_exits
;
1550 u64 host_state_reload
;
1553 u64 insn_emulation_fail
;
1559 u64 directed_yield_attempted
;
1560 u64 directed_yield_successful
;
1561 u64 preemption_reported
;
1562 u64 preemption_other
;
1564 u64 notify_window_exits
;
1567 struct x86_instruction_info
;
1570 bool host_initiated
;
1575 struct kvm_lapic_irq
{
1583 bool msi_redir_hint
;
1586 static inline u16
kvm_lapic_irq_dest_mode(bool dest_mode_logical
)
1588 return dest_mode_logical
? APIC_DEST_LOGICAL
: APIC_DEST_PHYSICAL
;
1591 struct kvm_x86_ops
{
1594 int (*check_processor_compatibility
)(void);
1596 int (*hardware_enable
)(void);
1597 void (*hardware_disable
)(void);
1598 void (*hardware_unsetup
)(void);
1599 bool (*has_emulated_msr
)(struct kvm
*kvm
, u32 index
);
1600 void (*vcpu_after_set_cpuid
)(struct kvm_vcpu
*vcpu
);
1602 unsigned int vm_size
;
1603 int (*vm_init
)(struct kvm
*kvm
);
1604 void (*vm_destroy
)(struct kvm
*kvm
);
1606 /* Create, but do not attach this VCPU */
1607 int (*vcpu_precreate
)(struct kvm
*kvm
);
1608 int (*vcpu_create
)(struct kvm_vcpu
*vcpu
);
1609 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
1610 void (*vcpu_reset
)(struct kvm_vcpu
*vcpu
, bool init_event
);
1612 void (*prepare_switch_to_guest
)(struct kvm_vcpu
*vcpu
);
1613 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
1614 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
1616 void (*update_exception_bitmap
)(struct kvm_vcpu
*vcpu
);
1617 int (*get_msr
)(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
);
1618 int (*set_msr
)(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
);
1619 u64 (*get_segment_base
)(struct kvm_vcpu
*vcpu
, int seg
);
1620 void (*get_segment
)(struct kvm_vcpu
*vcpu
,
1621 struct kvm_segment
*var
, int seg
);
1622 int (*get_cpl
)(struct kvm_vcpu
*vcpu
);
1623 void (*set_segment
)(struct kvm_vcpu
*vcpu
,
1624 struct kvm_segment
*var
, int seg
);
1625 void (*get_cs_db_l_bits
)(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
1626 bool (*is_valid_cr0
)(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
1627 void (*set_cr0
)(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
1628 void (*post_set_cr3
)(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
1629 bool (*is_valid_cr4
)(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
1630 void (*set_cr4
)(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
1631 int (*set_efer
)(struct kvm_vcpu
*vcpu
, u64 efer
);
1632 void (*get_idt
)(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
1633 void (*set_idt
)(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
1634 void (*get_gdt
)(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
1635 void (*set_gdt
)(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
1636 void (*sync_dirty_debug_regs
)(struct kvm_vcpu
*vcpu
);
1637 void (*set_dr7
)(struct kvm_vcpu
*vcpu
, unsigned long value
);
1638 void (*cache_reg
)(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
);
1639 unsigned long (*get_rflags
)(struct kvm_vcpu
*vcpu
);
1640 void (*set_rflags
)(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
1641 bool (*get_if_flag
)(struct kvm_vcpu
*vcpu
);
1643 void (*flush_tlb_all
)(struct kvm_vcpu
*vcpu
);
1644 void (*flush_tlb_current
)(struct kvm_vcpu
*vcpu
);
1645 #if IS_ENABLED(CONFIG_HYPERV)
1646 int (*flush_remote_tlbs
)(struct kvm
*kvm
);
1647 int (*flush_remote_tlbs_range
)(struct kvm
*kvm
, gfn_t gfn
,
1652 * Flush any TLB entries associated with the given GVA.
1653 * Does not need to flush GPA->HPA mappings.
1654 * Can potentially get non-canonical addresses through INVLPGs, which
1655 * the implementation may choose to ignore if appropriate.
1657 void (*flush_tlb_gva
)(struct kvm_vcpu
*vcpu
, gva_t addr
);
1660 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1661 * does not need to flush GPA->HPA mappings.
1663 void (*flush_tlb_guest
)(struct kvm_vcpu
*vcpu
);
1665 int (*vcpu_pre_run
)(struct kvm_vcpu
*vcpu
);
1666 enum exit_fastpath_completion (*vcpu_run
)(struct kvm_vcpu
*vcpu
);
1667 int (*handle_exit
)(struct kvm_vcpu
*vcpu
,
1668 enum exit_fastpath_completion exit_fastpath
);
1669 int (*skip_emulated_instruction
)(struct kvm_vcpu
*vcpu
);
1670 void (*update_emulated_instruction
)(struct kvm_vcpu
*vcpu
);
1671 void (*set_interrupt_shadow
)(struct kvm_vcpu
*vcpu
, int mask
);
1672 u32 (*get_interrupt_shadow
)(struct kvm_vcpu
*vcpu
);
1673 void (*patch_hypercall
)(struct kvm_vcpu
*vcpu
,
1674 unsigned char *hypercall_addr
);
1675 void (*inject_irq
)(struct kvm_vcpu
*vcpu
, bool reinjected
);
1676 void (*inject_nmi
)(struct kvm_vcpu
*vcpu
);
1677 void (*inject_exception
)(struct kvm_vcpu
*vcpu
);
1678 void (*cancel_injection
)(struct kvm_vcpu
*vcpu
);
1679 int (*interrupt_allowed
)(struct kvm_vcpu
*vcpu
, bool for_injection
);
1680 int (*nmi_allowed
)(struct kvm_vcpu
*vcpu
, bool for_injection
);
1681 bool (*get_nmi_mask
)(struct kvm_vcpu
*vcpu
);
1682 void (*set_nmi_mask
)(struct kvm_vcpu
*vcpu
, bool masked
);
1683 /* Whether or not a virtual NMI is pending in hardware. */
1684 bool (*is_vnmi_pending
)(struct kvm_vcpu
*vcpu
);
1686 * Attempt to pend a virtual NMI in harware. Returns %true on success
1687 * to allow using static_call_ret0 as the fallback.
1689 bool (*set_vnmi_pending
)(struct kvm_vcpu
*vcpu
);
1690 void (*enable_nmi_window
)(struct kvm_vcpu
*vcpu
);
1691 void (*enable_irq_window
)(struct kvm_vcpu
*vcpu
);
1692 void (*update_cr8_intercept
)(struct kvm_vcpu
*vcpu
, int tpr
, int irr
);
1693 bool (*check_apicv_inhibit_reasons
)(enum kvm_apicv_inhibit reason
);
1694 const unsigned long required_apicv_inhibits
;
1695 bool allow_apicv_in_x2apic_without_x2apic_virtualization
;
1696 void (*refresh_apicv_exec_ctrl
)(struct kvm_vcpu
*vcpu
);
1697 void (*hwapic_irr_update
)(struct kvm_vcpu
*vcpu
, int max_irr
);
1698 void (*hwapic_isr_update
)(int isr
);
1699 bool (*guest_apic_has_interrupt
)(struct kvm_vcpu
*vcpu
);
1700 void (*load_eoi_exitmap
)(struct kvm_vcpu
*vcpu
, u64
*eoi_exit_bitmap
);
1701 void (*set_virtual_apic_mode
)(struct kvm_vcpu
*vcpu
);
1702 void (*set_apic_access_page_addr
)(struct kvm_vcpu
*vcpu
);
1703 void (*deliver_interrupt
)(struct kvm_lapic
*apic
, int delivery_mode
,
1704 int trig_mode
, int vector
);
1705 int (*sync_pir_to_irr
)(struct kvm_vcpu
*vcpu
);
1706 int (*set_tss_addr
)(struct kvm
*kvm
, unsigned int addr
);
1707 int (*set_identity_map_addr
)(struct kvm
*kvm
, u64 ident_addr
);
1708 u8 (*get_mt_mask
)(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool is_mmio
);
1710 void (*load_mmu_pgd
)(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
,
1713 bool (*has_wbinvd_exit
)(void);
1715 u64 (*get_l2_tsc_offset
)(struct kvm_vcpu
*vcpu
);
1716 u64 (*get_l2_tsc_multiplier
)(struct kvm_vcpu
*vcpu
);
1717 void (*write_tsc_offset
)(struct kvm_vcpu
*vcpu
);
1718 void (*write_tsc_multiplier
)(struct kvm_vcpu
*vcpu
);
1721 * Retrieve somewhat arbitrary exit information. Intended to
1722 * be used only from within tracepoints or error paths.
1724 void (*get_exit_info
)(struct kvm_vcpu
*vcpu
, u32
*reason
,
1725 u64
*info1
, u64
*info2
,
1726 u32
*exit_int_info
, u32
*exit_int_info_err_code
);
1728 int (*check_intercept
)(struct kvm_vcpu
*vcpu
,
1729 struct x86_instruction_info
*info
,
1730 enum x86_intercept_stage stage
,
1731 struct x86_exception
*exception
);
1732 void (*handle_exit_irqoff
)(struct kvm_vcpu
*vcpu
);
1734 void (*request_immediate_exit
)(struct kvm_vcpu
*vcpu
);
1736 void (*sched_in
)(struct kvm_vcpu
*vcpu
, int cpu
);
1739 * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
1740 * value indicates CPU dirty logging is unsupported or disabled.
1742 int cpu_dirty_log_size
;
1743 void (*update_cpu_dirty_logging
)(struct kvm_vcpu
*vcpu
);
1745 const struct kvm_x86_nested_ops
*nested_ops
;
1747 void (*vcpu_blocking
)(struct kvm_vcpu
*vcpu
);
1748 void (*vcpu_unblocking
)(struct kvm_vcpu
*vcpu
);
1750 int (*pi_update_irte
)(struct kvm
*kvm
, unsigned int host_irq
,
1751 uint32_t guest_irq
, bool set
);
1752 void (*pi_start_assignment
)(struct kvm
*kvm
);
1753 void (*apicv_pre_state_restore
)(struct kvm_vcpu
*vcpu
);
1754 void (*apicv_post_state_restore
)(struct kvm_vcpu
*vcpu
);
1755 bool (*dy_apicv_has_pending_interrupt
)(struct kvm_vcpu
*vcpu
);
1757 int (*set_hv_timer
)(struct kvm_vcpu
*vcpu
, u64 guest_deadline_tsc
,
1759 void (*cancel_hv_timer
)(struct kvm_vcpu
*vcpu
);
1761 void (*setup_mce
)(struct kvm_vcpu
*vcpu
);
1763 #ifdef CONFIG_KVM_SMM
1764 int (*smi_allowed
)(struct kvm_vcpu
*vcpu
, bool for_injection
);
1765 int (*enter_smm
)(struct kvm_vcpu
*vcpu
, union kvm_smram
*smram
);
1766 int (*leave_smm
)(struct kvm_vcpu
*vcpu
, const union kvm_smram
*smram
);
1767 void (*enable_smi_window
)(struct kvm_vcpu
*vcpu
);
1770 int (*mem_enc_ioctl
)(struct kvm
*kvm
, void __user
*argp
);
1771 int (*mem_enc_register_region
)(struct kvm
*kvm
, struct kvm_enc_region
*argp
);
1772 int (*mem_enc_unregister_region
)(struct kvm
*kvm
, struct kvm_enc_region
*argp
);
1773 int (*vm_copy_enc_context_from
)(struct kvm
*kvm
, unsigned int source_fd
);
1774 int (*vm_move_enc_context_from
)(struct kvm
*kvm
, unsigned int source_fd
);
1775 void (*guest_memory_reclaimed
)(struct kvm
*kvm
);
1777 int (*get_msr_feature
)(struct kvm_msr_entry
*entry
);
1779 int (*check_emulate_instruction
)(struct kvm_vcpu
*vcpu
, int emul_type
,
1780 void *insn
, int insn_len
);
1782 bool (*apic_init_signal_blocked
)(struct kvm_vcpu
*vcpu
);
1783 int (*enable_l2_tlb_flush
)(struct kvm_vcpu
*vcpu
);
1785 void (*migrate_timers
)(struct kvm_vcpu
*vcpu
);
1786 void (*msr_filter_changed
)(struct kvm_vcpu
*vcpu
);
1787 int (*complete_emulated_msr
)(struct kvm_vcpu
*vcpu
, int err
);
1789 void (*vcpu_deliver_sipi_vector
)(struct kvm_vcpu
*vcpu
, u8 vector
);
1792 * Returns vCPU specific APICv inhibit reasons
1794 unsigned long (*vcpu_get_apicv_inhibit_reasons
)(struct kvm_vcpu
*vcpu
);
1796 gva_t (*get_untagged_addr
)(struct kvm_vcpu
*vcpu
, gva_t gva
, unsigned int flags
);
1799 struct kvm_x86_nested_ops
{
1800 void (*leave_nested
)(struct kvm_vcpu
*vcpu
);
1801 bool (*is_exception_vmexit
)(struct kvm_vcpu
*vcpu
, u8 vector
,
1803 int (*check_events
)(struct kvm_vcpu
*vcpu
);
1804 bool (*has_events
)(struct kvm_vcpu
*vcpu
);
1805 void (*triple_fault
)(struct kvm_vcpu
*vcpu
);
1806 int (*get_state
)(struct kvm_vcpu
*vcpu
,
1807 struct kvm_nested_state __user
*user_kvm_nested_state
,
1808 unsigned user_data_size
);
1809 int (*set_state
)(struct kvm_vcpu
*vcpu
,
1810 struct kvm_nested_state __user
*user_kvm_nested_state
,
1811 struct kvm_nested_state
*kvm_state
);
1812 bool (*get_nested_state_pages
)(struct kvm_vcpu
*vcpu
);
1813 int (*write_log_dirty
)(struct kvm_vcpu
*vcpu
, gpa_t l2_gpa
);
1815 int (*enable_evmcs
)(struct kvm_vcpu
*vcpu
,
1816 uint16_t *vmcs_version
);
1817 uint16_t (*get_evmcs_version
)(struct kvm_vcpu
*vcpu
);
1818 void (*hv_inject_synthetic_vmexit_post_tlb_flush
)(struct kvm_vcpu
*vcpu
);
1821 struct kvm_x86_init_ops
{
1822 int (*hardware_setup
)(void);
1823 unsigned int (*handle_intel_pt_intr
)(void);
1825 struct kvm_x86_ops
*runtime_ops
;
1826 struct kvm_pmu_ops
*pmu_ops
;
1829 struct kvm_arch_async_pf
{
1836 extern u32 __read_mostly kvm_nr_uret_msrs
;
1837 extern u64 __read_mostly host_efer
;
1838 extern bool __read_mostly allow_smaller_maxphyaddr
;
1839 extern bool __read_mostly enable_apicv
;
1840 extern struct kvm_x86_ops kvm_x86_ops
;
1842 #define KVM_X86_OP(func) \
1843 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1844 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
1845 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
1846 #include <asm/kvm-x86-ops.h>
1848 int kvm_x86_vendor_init(struct kvm_x86_init_ops
*ops
);
1849 void kvm_x86_vendor_exit(void);
1851 #define __KVM_HAVE_ARCH_VM_ALLOC
1852 static inline struct kvm
*kvm_arch_alloc_vm(void)
1854 return __vmalloc(kvm_x86_ops
.vm_size
, GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
1857 #define __KVM_HAVE_ARCH_VM_FREE
1858 void kvm_arch_free_vm(struct kvm
*kvm
);
1860 #if IS_ENABLED(CONFIG_HYPERV)
1861 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1862 static inline int kvm_arch_flush_remote_tlbs(struct kvm
*kvm
)
1864 if (kvm_x86_ops
.flush_remote_tlbs
&&
1865 !static_call(kvm_x86_flush_remote_tlbs
)(kvm
))
1871 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1872 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm
*kvm
, gfn_t gfn
,
1875 if (!kvm_x86_ops
.flush_remote_tlbs_range
)
1878 return static_call(kvm_x86_flush_remote_tlbs_range
)(kvm
, gfn
, nr_pages
);
1880 #endif /* CONFIG_HYPERV */
1882 #define kvm_arch_pmi_in_guest(vcpu) \
1883 ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
1885 void __init
kvm_mmu_x86_module_init(void);
1886 int kvm_mmu_vendor_module_init(void);
1887 void kvm_mmu_vendor_module_exit(void);
1889 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
);
1890 int kvm_mmu_create(struct kvm_vcpu
*vcpu
);
1891 void kvm_mmu_init_vm(struct kvm
*kvm
);
1892 void kvm_mmu_uninit_vm(struct kvm
*kvm
);
1894 void kvm_mmu_init_memslot_memory_attributes(struct kvm
*kvm
,
1895 struct kvm_memory_slot
*slot
);
1897 void kvm_mmu_after_set_cpuid(struct kvm_vcpu
*vcpu
);
1898 void kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
);
1899 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
,
1900 const struct kvm_memory_slot
*memslot
,
1902 void kvm_mmu_slot_try_split_huge_pages(struct kvm
*kvm
,
1903 const struct kvm_memory_slot
*memslot
,
1905 void kvm_mmu_try_split_huge_pages(struct kvm
*kvm
,
1906 const struct kvm_memory_slot
*memslot
,
1909 void kvm_mmu_zap_collapsible_sptes(struct kvm
*kvm
,
1910 const struct kvm_memory_slot
*memslot
);
1911 void kvm_mmu_slot_leaf_clear_dirty(struct kvm
*kvm
,
1912 const struct kvm_memory_slot
*memslot
);
1913 void kvm_mmu_invalidate_mmio_sptes(struct kvm
*kvm
, u64 gen
);
1914 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned long kvm_nr_mmu_pages
);
1916 int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
1918 int emulator_write_phys(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
1919 const void *val
, int bytes
);
1921 struct kvm_irq_mask_notifier
{
1922 void (*func
)(struct kvm_irq_mask_notifier
*kimn
, bool masked
);
1924 struct hlist_node link
;
1927 void kvm_register_irq_mask_notifier(struct kvm
*kvm
, int irq
,
1928 struct kvm_irq_mask_notifier
*kimn
);
1929 void kvm_unregister_irq_mask_notifier(struct kvm
*kvm
, int irq
,
1930 struct kvm_irq_mask_notifier
*kimn
);
1931 void kvm_fire_mask_notifiers(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
,
1934 extern bool tdp_enabled
;
1936 u64
vcpu_tsc_khz(struct kvm_vcpu
*vcpu
);
1939 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
1940 * userspace I/O) to indicate that the emulation context
1941 * should be reused as is, i.e. skip initialization of
1942 * emulation context, instruction fetch and decode.
1944 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
1945 * Indicates that only select instructions (tagged with
1946 * EmulateOnUD) should be emulated (to minimize the emulator
1947 * attack surface). See also EMULTYPE_TRAP_UD_FORCED.
1949 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
1950 * decode the instruction length. For use *only* by
1951 * kvm_x86_ops.skip_emulated_instruction() implementations if
1952 * EMULTYPE_COMPLETE_USER_EXIT is not set.
1954 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
1955 * retry native execution under certain conditions,
1956 * Can only be set in conjunction with EMULTYPE_PF.
1958 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
1959 * triggered by KVM's magic "force emulation" prefix,
1960 * which is opt in via module param (off by default).
1961 * Bypasses EmulateOnUD restriction despite emulating
1962 * due to an intercepted #UD (see EMULTYPE_TRAP_UD).
1963 * Used to test the full emulator from userspace.
1965 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
1966 * backdoor emulation, which is opt in via module param.
1967 * VMware backdoor emulation handles select instructions
1968 * and reinjects the #GP for all other cases.
1970 * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
1971 * case the CR2/GPA value pass on the stack is valid.
1973 * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
1974 * state and inject single-step #DBs after skipping
1975 * an instruction (after completing userspace I/O).
1977 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
1978 * is attempting to write a gfn that contains one or
1979 * more of the PTEs used to translate the write itself,
1980 * and the owning page table is being shadowed by KVM.
1981 * If emulation of the faulting instruction fails and
1982 * this flag is set, KVM will exit to userspace instead
1983 * of retrying emulation as KVM cannot make forward
1986 * If emulation fails for a write to guest page tables,
1987 * KVM unprotects (zaps) the shadow page for the target
1988 * gfn and resumes the guest to retry the non-emulatable
1989 * instruction (on hardware). Unprotecting the gfn
1990 * doesn't allow forward progress for a self-changing
1991 * access because doing so also zaps the translation for
1992 * the gfn, i.e. retrying the instruction will hit a
1993 * !PRESENT fault, which results in a new shadow page
1994 * and sends KVM back to square one.
1996 #define EMULTYPE_NO_DECODE (1 << 0)
1997 #define EMULTYPE_TRAP_UD (1 << 1)
1998 #define EMULTYPE_SKIP (1 << 2)
1999 #define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
2000 #define EMULTYPE_TRAP_UD_FORCED (1 << 4)
2001 #define EMULTYPE_VMWARE_GP (1 << 5)
2002 #define EMULTYPE_PF (1 << 6)
2003 #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
2004 #define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
2006 int kvm_emulate_instruction(struct kvm_vcpu
*vcpu
, int emulation_type
);
2007 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu
*vcpu
,
2008 void *insn
, int insn_len
);
2009 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu
*vcpu
,
2010 u64
*data
, u8 ndata
);
2011 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu
*vcpu
);
2013 void kvm_enable_efer_bits(u64
);
2014 bool kvm_valid_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
2015 int __kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64
*data
, bool host_initiated
);
2016 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64
*data
);
2017 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 index
, u64 data
);
2018 int kvm_emulate_rdmsr(struct kvm_vcpu
*vcpu
);
2019 int kvm_emulate_wrmsr(struct kvm_vcpu
*vcpu
);
2020 int kvm_emulate_as_nop(struct kvm_vcpu
*vcpu
);
2021 int kvm_emulate_invd(struct kvm_vcpu
*vcpu
);
2022 int kvm_emulate_mwait(struct kvm_vcpu
*vcpu
);
2023 int kvm_handle_invalid_op(struct kvm_vcpu
*vcpu
);
2024 int kvm_emulate_monitor(struct kvm_vcpu
*vcpu
);
2026 int kvm_fast_pio(struct kvm_vcpu
*vcpu
, int size
, unsigned short port
, int in
);
2027 int kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
);
2028 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
);
2029 int kvm_emulate_halt_noskip(struct kvm_vcpu
*vcpu
);
2030 int kvm_emulate_ap_reset_hold(struct kvm_vcpu
*vcpu
);
2031 int kvm_emulate_wbinvd(struct kvm_vcpu
*vcpu
);
2033 void kvm_get_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
2034 void kvm_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
2035 int kvm_load_segment_descriptor(struct kvm_vcpu
*vcpu
, u16 selector
, int seg
);
2036 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu
*vcpu
, u8 vector
);
2038 int kvm_task_switch(struct kvm_vcpu
*vcpu
, u16 tss_selector
, int idt_index
,
2039 int reason
, bool has_error_code
, u32 error_code
);
2041 void kvm_post_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long old_cr0
, unsigned long cr0
);
2042 void kvm_post_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long old_cr4
, unsigned long cr4
);
2043 int kvm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
2044 int kvm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
2045 int kvm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
2046 int kvm_set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr8
);
2047 int kvm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long val
);
2048 void kvm_get_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long *val
);
2049 unsigned long kvm_get_cr8(struct kvm_vcpu
*vcpu
);
2050 void kvm_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
);
2051 int kvm_emulate_xsetbv(struct kvm_vcpu
*vcpu
);
2053 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
);
2054 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, struct msr_data
*msr
);
2056 unsigned long kvm_get_rflags(struct kvm_vcpu
*vcpu
);
2057 void kvm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
2058 int kvm_emulate_rdpmc(struct kvm_vcpu
*vcpu
);
2060 void kvm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
);
2061 void kvm_queue_exception_e(struct kvm_vcpu
*vcpu
, unsigned nr
, u32 error_code
);
2062 void kvm_queue_exception_p(struct kvm_vcpu
*vcpu
, unsigned nr
, unsigned long payload
);
2063 void kvm_requeue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
);
2064 void kvm_requeue_exception_e(struct kvm_vcpu
*vcpu
, unsigned nr
, u32 error_code
);
2065 void kvm_inject_page_fault(struct kvm_vcpu
*vcpu
, struct x86_exception
*fault
);
2066 void kvm_inject_emulated_page_fault(struct kvm_vcpu
*vcpu
,
2067 struct x86_exception
*fault
);
2068 bool kvm_require_cpl(struct kvm_vcpu
*vcpu
, int required_cpl
);
2069 bool kvm_require_dr(struct kvm_vcpu
*vcpu
, int dr
);
2071 static inline int __kvm_irq_line_state(unsigned long *irq_state
,
2072 int irq_source_id
, int level
)
2074 /* Logical OR for level trig interrupt */
2076 __set_bit(irq_source_id
, irq_state
);
2078 __clear_bit(irq_source_id
, irq_state
);
2080 return !!(*irq_state
);
2083 int kvm_pic_set_irq(struct kvm_pic
*pic
, int irq
, int irq_source_id
, int level
);
2084 void kvm_pic_clear_all(struct kvm_pic
*pic
, int irq_source_id
);
2086 void kvm_inject_nmi(struct kvm_vcpu
*vcpu
);
2087 int kvm_get_nr_pending_nmis(struct kvm_vcpu
*vcpu
);
2089 void kvm_update_dr7(struct kvm_vcpu
*vcpu
);
2091 int kvm_mmu_unprotect_page(struct kvm
*kvm
, gfn_t gfn
);
2092 void kvm_mmu_free_roots(struct kvm
*kvm
, struct kvm_mmu
*mmu
,
2093 ulong roots_to_free
);
2094 void kvm_mmu_free_guest_mode_roots(struct kvm
*kvm
, struct kvm_mmu
*mmu
);
2095 gpa_t
kvm_mmu_gva_to_gpa_read(struct kvm_vcpu
*vcpu
, gva_t gva
,
2096 struct x86_exception
*exception
);
2097 gpa_t
kvm_mmu_gva_to_gpa_write(struct kvm_vcpu
*vcpu
, gva_t gva
,
2098 struct x86_exception
*exception
);
2099 gpa_t
kvm_mmu_gva_to_gpa_system(struct kvm_vcpu
*vcpu
, gva_t gva
,
2100 struct x86_exception
*exception
);
2102 bool kvm_apicv_activated(struct kvm
*kvm
);
2103 bool kvm_vcpu_apicv_activated(struct kvm_vcpu
*vcpu
);
2104 void __kvm_vcpu_update_apicv(struct kvm_vcpu
*vcpu
);
2105 void __kvm_set_or_clear_apicv_inhibit(struct kvm
*kvm
,
2106 enum kvm_apicv_inhibit reason
, bool set
);
2107 void kvm_set_or_clear_apicv_inhibit(struct kvm
*kvm
,
2108 enum kvm_apicv_inhibit reason
, bool set
);
2110 static inline void kvm_set_apicv_inhibit(struct kvm
*kvm
,
2111 enum kvm_apicv_inhibit reason
)
2113 kvm_set_or_clear_apicv_inhibit(kvm
, reason
, true);
2116 static inline void kvm_clear_apicv_inhibit(struct kvm
*kvm
,
2117 enum kvm_apicv_inhibit reason
)
2119 kvm_set_or_clear_apicv_inhibit(kvm
, reason
, false);
2122 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
);
2124 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gpa_t cr2_or_gpa
, u64 error_code
,
2125 void *insn
, int insn_len
);
2126 void kvm_mmu_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
);
2127 void kvm_mmu_invalidate_addr(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
2128 u64 addr
, unsigned long roots
);
2129 void kvm_mmu_invpcid_gva(struct kvm_vcpu
*vcpu
, gva_t gva
, unsigned long pcid
);
2130 void kvm_mmu_new_pgd(struct kvm_vcpu
*vcpu
, gpa_t new_pgd
);
2132 void kvm_configure_mmu(bool enable_tdp
, int tdp_forced_root_level
,
2133 int tdp_max_root_level
, int tdp_huge_page_level
);
2135 #ifdef CONFIG_KVM_PRIVATE_MEM
2136 #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.vm_type != KVM_X86_DEFAULT_VM)
2138 #define kvm_arch_has_private_mem(kvm) false
2141 static inline u16
kvm_read_ldt(void)
2144 asm("sldt %0" : "=g"(ldt
));
2148 static inline void kvm_load_ldt(u16 sel
)
2150 asm("lldt %0" : : "rm"(sel
));
2153 #ifdef CONFIG_X86_64
2154 static inline unsigned long read_msr(unsigned long msr
)
2163 static inline void kvm_inject_gp(struct kvm_vcpu
*vcpu
, u32 error_code
)
2165 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
2168 #define TSS_IOPB_BASE_OFFSET 0x66
2169 #define TSS_BASE_SIZE 0x68
2170 #define TSS_IOPB_SIZE (65536 / 8)
2171 #define TSS_REDIRECTION_SIZE (256 / 8)
2172 #define RMODE_TSS_SIZE \
2173 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
2176 TASK_SWITCH_CALL
= 0,
2177 TASK_SWITCH_IRET
= 1,
2178 TASK_SWITCH_JMP
= 2,
2179 TASK_SWITCH_GATE
= 3,
2182 #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */
2184 #ifdef CONFIG_KVM_SMM
2185 #define HF_SMM_MASK (1 << 1)
2186 #define HF_SMM_INSIDE_NMI_MASK (1 << 2)
2188 # define KVM_MAX_NR_ADDRESS_SPACES 2
2189 /* SMM is currently unsupported for guests with private memory. */
2190 # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
2191 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2192 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
2194 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
2197 int kvm_cpu_has_injectable_intr(struct kvm_vcpu
*v
);
2198 int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
);
2199 int kvm_cpu_has_extint(struct kvm_vcpu
*v
);
2200 int kvm_arch_interrupt_allowed(struct kvm_vcpu
*vcpu
);
2201 int kvm_cpu_get_interrupt(struct kvm_vcpu
*v
);
2202 void kvm_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
);
2204 int kvm_pv_send_ipi(struct kvm
*kvm
, unsigned long ipi_bitmap_low
,
2205 unsigned long ipi_bitmap_high
, u32 min
,
2206 unsigned long icr
, int op_64_bit
);
2208 int kvm_add_user_return_msr(u32 msr
);
2209 int kvm_find_user_return_msr(u32 msr
);
2210 int kvm_set_user_return_msr(unsigned index
, u64 val
, u64 mask
);
2212 static inline bool kvm_is_supported_user_return_msr(u32 msr
)
2214 return kvm_find_user_return_msr(msr
) >= 0;
2217 u64
kvm_scale_tsc(u64 tsc
, u64 ratio
);
2218 u64
kvm_read_l1_tsc(struct kvm_vcpu
*vcpu
, u64 host_tsc
);
2219 u64
kvm_calc_nested_tsc_offset(u64 l1_offset
, u64 l2_offset
, u64 l2_multiplier
);
2220 u64
kvm_calc_nested_tsc_multiplier(u64 l1_multiplier
, u64 l2_multiplier
);
2222 unsigned long kvm_get_linear_rip(struct kvm_vcpu
*vcpu
);
2223 bool kvm_is_linear_rip(struct kvm_vcpu
*vcpu
, unsigned long linear_rip
);
2225 void kvm_make_scan_ioapic_request(struct kvm
*kvm
);
2226 void kvm_make_scan_ioapic_request_mask(struct kvm
*kvm
,
2227 unsigned long *vcpu_bitmap
);
2229 bool kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
2230 struct kvm_async_pf
*work
);
2231 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
2232 struct kvm_async_pf
*work
);
2233 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
2234 struct kvm_async_pf
*work
);
2235 void kvm_arch_async_page_present_queued(struct kvm_vcpu
*vcpu
);
2236 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu
*vcpu
);
2237 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
);
2239 int kvm_skip_emulated_instruction(struct kvm_vcpu
*vcpu
);
2240 int kvm_complete_insn_gp(struct kvm_vcpu
*vcpu
, int err
);
2241 void __kvm_request_immediate_exit(struct kvm_vcpu
*vcpu
);
2243 void __user
*__x86_set_memory_region(struct kvm
*kvm
, int id
, gpa_t gpa
,
2245 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu
*vcpu
);
2246 bool kvm_vcpu_is_bsp(struct kvm_vcpu
*vcpu
);
2248 bool kvm_intr_is_single_vcpu(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
2249 struct kvm_vcpu
**dest_vcpu
);
2251 void kvm_set_msi_irq(struct kvm
*kvm
, struct kvm_kernel_irq_routing_entry
*e
,
2252 struct kvm_lapic_irq
*irq
);
2254 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq
*irq
)
2256 /* We can only post Fixed and LowPrio IRQs */
2257 return (irq
->delivery_mode
== APIC_DM_FIXED
||
2258 irq
->delivery_mode
== APIC_DM_LOWEST
);
2261 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
2263 static_call_cond(kvm_x86_vcpu_blocking
)(vcpu
);
2266 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
2268 static_call_cond(kvm_x86_vcpu_unblocking
)(vcpu
);
2271 static inline int kvm_cpu_get_apicid(int mps_cpu
)
2273 #ifdef CONFIG_X86_LOCAL_APIC
2274 return default_cpu_present_to_apicid(mps_cpu
);
2281 int memslot_rmap_alloc(struct kvm_memory_slot
*slot
, unsigned long npages
);
2283 #define KVM_CLOCK_VALID_FLAGS \
2284 (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
2286 #define KVM_X86_VALID_QUIRKS \
2287 (KVM_X86_QUIRK_LINT0_REENABLED | \
2288 KVM_X86_QUIRK_CD_NW_CLEARED | \
2289 KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \
2290 KVM_X86_QUIRK_OUT_7E_INC_RIP | \
2291 KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
2292 KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
2293 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
2296 * KVM previously used a u32 field in kvm_run to indicate the hypercall was
2297 * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
2298 * remaining 31 lower bits must be 0 to preserve ABI.
2300 #define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
2302 #endif /* _ASM_X86_KVM_HOST_H */