]> git.ipfire.org Git - thirdparty/qemu.git/blame - linux-headers/asm-x86/kvm.h
Update linux headers to v6.0-rc4
[thirdparty/qemu.git] / linux-headers / asm-x86 / kvm.h
CommitLineData
dd873966 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
51b24e34
JK
2#ifndef _ASM_X86_KVM_H
3#define _ASM_X86_KVM_H
4
5/*
6 * KVM x86 specific structures and definitions
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/ioctl.h>
12
74c98e20
CH
13#define KVM_PIO_PAGE_OFFSET 1
14#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
b3c818a4 15#define KVM_DIRTY_LOG_PAGE_OFFSET 64
74c98e20 16
716b8e4d
AW
17#define DE_VECTOR 0
18#define DB_VECTOR 1
19#define BP_VECTOR 3
20#define OF_VECTOR 4
21#define BR_VECTOR 5
22#define UD_VECTOR 6
23#define NM_VECTOR 7
24#define DF_VECTOR 8
25#define TS_VECTOR 10
26#define NP_VECTOR 11
27#define SS_VECTOR 12
28#define GP_VECTOR 13
29#define PF_VECTOR 14
30#define MF_VECTOR 16
a9fd1654 31#define AC_VECTOR 17
716b8e4d 32#define MC_VECTOR 18
a9fd1654
JF
33#define XM_VECTOR 19
34#define VE_VECTOR 20
716b8e4d 35
51b24e34
JK
36/* Select x86 specific features in <linux/kvm.h> */
37#define __KVM_HAVE_PIT
38#define __KVM_HAVE_IOAPIC
651682dc 39#define __KVM_HAVE_IRQ_LINE
51b24e34
JK
40#define __KVM_HAVE_MSI
41#define __KVM_HAVE_USER_NMI
42#define __KVM_HAVE_GUEST_DEBUG
43#define __KVM_HAVE_MSIX
44#define __KVM_HAVE_MCE
45#define __KVM_HAVE_PIT_STATE2
46#define __KVM_HAVE_XEN_HVM
47#define __KVM_HAVE_VCPU_EVENTS
48#define __KVM_HAVE_DEBUGREGS
49#define __KVM_HAVE_XSAVE
50#define __KVM_HAVE_XCRS
716b8e4d 51#define __KVM_HAVE_READONLY_MEM
51b24e34
JK
52
53/* Architectural interrupt line count. */
54#define KVM_NR_INTERRUPTS 256
55
56struct kvm_memory_alias {
57 __u32 slot; /* this has a different namespace than memory slots */
58 __u32 flags;
59 __u64 guest_phys_addr;
60 __u64 memory_size;
61 __u64 target_phys_addr;
62};
63
64/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
65struct kvm_pic_state {
66 __u8 last_irr; /* edge detection */
67 __u8 irr; /* interrupt request register */
68 __u8 imr; /* interrupt mask register */
69 __u8 isr; /* interrupt service register */
70 __u8 priority_add; /* highest irq priority */
71 __u8 irq_base;
72 __u8 read_reg_select;
73 __u8 poll;
74 __u8 special_mask;
75 __u8 init_state;
76 __u8 auto_eoi;
77 __u8 rotate_on_auto_eoi;
78 __u8 special_fully_nested_mode;
79 __u8 init4; /* true if 4 byte init */
80 __u8 elcr; /* PIIX edge/trigger selection */
81 __u8 elcr_mask;
82};
83
84#define KVM_IOAPIC_NUM_PINS 24
85struct kvm_ioapic_state {
86 __u64 base_address;
87 __u32 ioregsel;
88 __u32 id;
89 __u32 irr;
90 __u32 pad;
91 union {
92 __u64 bits;
93 struct {
94 __u8 vector;
95 __u8 delivery_mode:3;
96 __u8 dest_mode:1;
97 __u8 delivery_status:1;
98 __u8 polarity:1;
99 __u8 remote_irr:1;
100 __u8 trig_mode:1;
101 __u8 mask:1;
102 __u8 reserve:7;
103 __u8 reserved[4];
104 __u8 dest_id;
105 } fields;
106 } redirtbl[KVM_IOAPIC_NUM_PINS];
107};
108
109#define KVM_IRQCHIP_PIC_MASTER 0
110#define KVM_IRQCHIP_PIC_SLAVE 1
111#define KVM_IRQCHIP_IOAPIC 2
112#define KVM_NR_IRQCHIPS 3
113
24a31426 114#define KVM_RUN_X86_SMM (1 << 0)
278f064e 115#define KVM_RUN_X86_BUS_LOCK (1 << 1)
24a31426 116
51b24e34
JK
117/* for KVM_GET_REGS and KVM_SET_REGS */
118struct kvm_regs {
119 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
120 __u64 rax, rbx, rcx, rdx;
121 __u64 rsi, rdi, rsp, rbp;
122 __u64 r8, r9, r10, r11;
123 __u64 r12, r13, r14, r15;
124 __u64 rip, rflags;
125};
126
127/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
128#define KVM_APIC_REG_SIZE 0x400
129struct kvm_lapic_state {
130 char regs[KVM_APIC_REG_SIZE];
131};
132
133struct kvm_segment {
134 __u64 base;
135 __u32 limit;
136 __u16 selector;
137 __u8 type;
138 __u8 present, dpl, db, s, l, g, avl;
139 __u8 unusable;
140 __u8 padding;
141};
142
143struct kvm_dtable {
144 __u64 base;
145 __u16 limit;
146 __u16 padding[3];
147};
148
149
150/* for KVM_GET_SREGS and KVM_SET_SREGS */
151struct kvm_sregs {
152 /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
153 struct kvm_segment cs, ds, es, fs, gs, ss;
154 struct kvm_segment tr, ldt;
155 struct kvm_dtable gdt, idt;
156 __u64 cr0, cr2, cr3, cr4, cr8;
157 __u64 efer;
158 __u64 apic_base;
b07d1c2f 159 __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
51b24e34
JK
160};
161
327d4b7f
BR
162struct kvm_sregs2 {
163 /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
164 struct kvm_segment cs, ds, es, fs, gs, ss;
165 struct kvm_segment tr, ldt;
166 struct kvm_dtable gdt, idt;
167 __u64 cr0, cr2, cr3, cr4, cr8;
168 __u64 efer;
169 __u64 apic_base;
170 __u64 flags;
171 __u64 pdptrs[4];
172};
173#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
174
51b24e34
JK
175/* for KVM_GET_FPU and KVM_SET_FPU */
176struct kvm_fpu {
177 __u8 fpr[8][16];
178 __u16 fcw;
179 __u16 fsw;
180 __u8 ftwx; /* in fxsave format */
181 __u8 pad1;
182 __u16 last_opcode;
183 __u64 last_ip;
184 __u64 last_dp;
185 __u8 xmm[16][16];
186 __u32 mxcsr;
187 __u32 pad2;
188};
189
190struct kvm_msr_entry {
191 __u32 index;
192 __u32 reserved;
193 __u64 data;
194};
195
196/* for KVM_GET_MSRS and KVM_SET_MSRS */
197struct kvm_msrs {
198 __u32 nmsrs; /* number of msrs in entries */
199 __u32 pad;
200
d525f73f 201 struct kvm_msr_entry entries[];
51b24e34
JK
202};
203
204/* for KVM_GET_MSR_INDEX_LIST */
205struct kvm_msr_list {
206 __u32 nmsrs; /* number of msrs in entries */
d525f73f 207 __u32 indices[];
51b24e34
JK
208};
209
53ba2eee
MR
210/* Maximum size of any access bitmap in bytes */
211#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600
212
213/* for KVM_X86_SET_MSR_FILTER */
214struct kvm_msr_filter_range {
215#define KVM_MSR_FILTER_READ (1 << 0)
216#define KVM_MSR_FILTER_WRITE (1 << 1)
217 __u32 flags;
218 __u32 nmsrs; /* number of msrs in bitmap */
219 __u32 base; /* MSR index the bitmap starts at */
220 __u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */
221};
222
223#define KVM_MSR_FILTER_MAX_RANGES 16
224struct kvm_msr_filter {
225#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
226#define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0)
227 __u32 flags;
228 struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
229};
51b24e34
JK
230
231struct kvm_cpuid_entry {
232 __u32 function;
233 __u32 eax;
234 __u32 ebx;
235 __u32 ecx;
236 __u32 edx;
237 __u32 padding;
238};
239
240/* for KVM_SET_CPUID */
241struct kvm_cpuid {
242 __u32 nent;
243 __u32 padding;
d525f73f 244 struct kvm_cpuid_entry entries[];
51b24e34
JK
245};
246
247struct kvm_cpuid_entry2 {
248 __u32 function;
249 __u32 index;
250 __u32 flags;
251 __u32 eax;
252 __u32 ebx;
253 __u32 ecx;
254 __u32 edx;
255 __u32 padding[3];
256};
257
ff804f15
CH
258#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX (1 << 0)
259#define KVM_CPUID_FLAG_STATEFUL_FUNC (1 << 1)
260#define KVM_CPUID_FLAG_STATE_READ_NEXT (1 << 2)
51b24e34
JK
261
262/* for KVM_SET_CPUID2 */
263struct kvm_cpuid2 {
264 __u32 nent;
265 __u32 padding;
d525f73f 266 struct kvm_cpuid_entry2 entries[];
51b24e34
JK
267};
268
269/* for KVM_GET_PIT and KVM_SET_PIT */
270struct kvm_pit_channel_state {
271 __u32 count; /* can be 65536 */
272 __u16 latched_count;
273 __u8 count_latched;
274 __u8 status_latched;
275 __u8 status;
276 __u8 read_state;
277 __u8 write_state;
278 __u8 write_latch;
279 __u8 rw_mode;
280 __u8 mode;
281 __u8 bcd;
282 __u8 gate;
283 __s64 count_load_time;
284};
285
286struct kvm_debug_exit_arch {
287 __u32 exception;
288 __u32 pad;
289 __u64 pc;
290 __u64 dr6;
291 __u64 dr7;
292};
293
294#define KVM_GUESTDBG_USE_SW_BP 0x00010000
295#define KVM_GUESTDBG_USE_HW_BP 0x00020000
296#define KVM_GUESTDBG_INJECT_DB 0x00040000
297#define KVM_GUESTDBG_INJECT_BP 0x00080000
43709a0c 298#define KVM_GUESTDBG_BLOCKIRQ 0x00100000
51b24e34
JK
299
300/* for KVM_SET_GUEST_DEBUG */
301struct kvm_guest_debug_arch {
302 __u64 debugreg[8];
303};
304
305struct kvm_pit_state {
306 struct kvm_pit_channel_state channels[3];
307};
308
d525f73f
CQ
309#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
310#define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002
51b24e34
JK
311
312struct kvm_pit_state2 {
313 struct kvm_pit_channel_state channels[3];
314 __u32 flags;
315 __u32 reserved[9];
316};
317
318struct kvm_reinject_control {
319 __u8 pit_reinject;
320 __u8 reserved[31];
321};
322
323/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
324#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
325#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
326#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
24a31426 327#define KVM_VCPUEVENT_VALID_SMM 0x00000008
966f2ec3 328#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
d525f73f 329#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020
51b24e34
JK
330
331/* Interrupt shadow states */
332#define KVM_X86_SHADOW_INT_MOV_SS 0x01
333#define KVM_X86_SHADOW_INT_STI 0x02
334
335/* for KVM_GET/SET_VCPU_EVENTS */
336struct kvm_vcpu_events {
337 struct {
338 __u8 injected;
339 __u8 nr;
340 __u8 has_error_code;
966f2ec3 341 __u8 pending;
51b24e34
JK
342 __u32 error_code;
343 } exception;
344 struct {
345 __u8 injected;
346 __u8 nr;
347 __u8 soft;
348 __u8 shadow;
349 } interrupt;
350 struct {
351 __u8 injected;
352 __u8 pending;
353 __u8 masked;
354 __u8 pad;
355 } nmi;
356 __u32 sipi_vector;
357 __u32 flags;
24a31426
PB
358 struct {
359 __u8 smm;
360 __u8 pending;
361 __u8 smm_inside_nmi;
362 __u8 latched_init;
363 } smi;
d525f73f
CQ
364 struct {
365 __u8 pending;
366 } triple_fault;
367 __u8 reserved[26];
966f2ec3
PB
368 __u8 exception_has_payload;
369 __u64 exception_payload;
51b24e34
JK
370};
371
372/* for KVM_GET/SET_DEBUGREGS */
373struct kvm_debugregs {
374 __u64 db[4];
375 __u64 dr6;
376 __u64 dr7;
377 __u64 flags;
378 __u64 reserved[9];
379};
380
ef17dd6a 381/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */
51b24e34 382struct kvm_xsave {
ef17dd6a
VG
383 /*
384 * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes
385 * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
386 * respectively, when invoked on the vm file descriptor.
387 *
388 * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
389 * will always be at least 4096. Currently, it is only greater
390 * than 4096 if a dynamic feature has been enabled with
391 * ``arch_prctl()``, but this may change in the future.
392 *
393 * The offsets of the state save areas in struct kvm_xsave follow
394 * the contents of CPUID leaf 0xD on the host.
395 */
51b24e34 396 __u32 region[1024];
d525f73f 397 __u32 extra[];
51b24e34
JK
398};
399
400#define KVM_MAX_XCRS 16
401
402struct kvm_xcr {
403 __u32 xcr;
404 __u32 reserved;
405 __u64 value;
406};
407
408struct kvm_xcrs {
409 __u32 nr_xcrs;
410 __u32 flags;
411 struct kvm_xcr xcrs[KVM_MAX_XCRS];
412 __u64 padding[16];
413};
414
65a6d8dd
PM
415#define KVM_SYNC_X86_REGS (1UL << 0)
416#define KVM_SYNC_X86_SREGS (1UL << 1)
417#define KVM_SYNC_X86_EVENTS (1UL << 2)
418
419#define KVM_SYNC_X86_VALID_FIELDS \
420 (KVM_SYNC_X86_REGS| \
421 KVM_SYNC_X86_SREGS| \
422 KVM_SYNC_X86_EVENTS)
423
424/* kvm_sync_regs struct included by kvm_run struct */
1529ae1b 425struct kvm_sync_regs {
65a6d8dd
PM
426 /* Members of this structure are potentially malicious.
427 * Care must be taken by code reading, esp. interpreting,
428 * data fields from them inside KVM to prevent TOCTOU and
429 * double-fetch types of vulnerabilities.
430 */
431 struct kvm_regs regs;
432 struct kvm_sregs sregs;
433 struct kvm_vcpu_events events;
1529ae1b
AG
434};
435
d525f73f
CQ
436#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
437#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
438#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
439#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
440#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
441#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
442#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
24a31426 443
1d33bea4 444#define KVM_STATE_NESTED_FORMAT_VMX 0
f76b348e 445#define KVM_STATE_NESTED_FORMAT_SVM 1
1d33bea4 446
d36f7de8
CH
447#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
448#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
966f2ec3 449#define KVM_STATE_NESTED_EVMCS 0x00000004
dc6f8d45 450#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
f76b348e 451#define KVM_STATE_NESTED_GIF_SET 0x00000100
d36f7de8
CH
452
453#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
454#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
455
f363d039
EA
456#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
457
f76b348e
CH
458#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000
459
460#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
461
1ea5208f
PB
462/* attributes for system fd (group 0) */
463#define KVM_X86_XCOMP_GUEST_SUPP 0
464
1d33bea4
LA
465struct kvm_vmx_nested_state_data {
466 __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
467 __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
468};
469
470struct kvm_vmx_nested_state_hdr {
d36f7de8 471 __u64 vmxon_pa;
1d33bea4 472 __u64 vmcs12_pa;
d36f7de8
CH
473
474 struct {
475 __u16 flags;
476 } smm;
56908dc5 477
278f064e
EH
478 __u16 pad;
479
56908dc5
PB
480 __u32 flags;
481 __u64 preemption_timer_deadline;
d36f7de8
CH
482};
483
f76b348e
CH
484struct kvm_svm_nested_state_data {
485 /* Save area only used if KVM_STATE_NESTED_RUN_PENDING. */
486 __u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
487};
488
489struct kvm_svm_nested_state_hdr {
490 __u64 vmcb_pa;
491};
492
d36f7de8
CH
493/* for KVM_CAP_NESTED_STATE */
494struct kvm_nested_state {
d36f7de8 495 __u16 flags;
d36f7de8 496 __u16 format;
d36f7de8
CH
497 __u32 size;
498
499 union {
1d33bea4 500 struct kvm_vmx_nested_state_hdr vmx;
f76b348e 501 struct kvm_svm_nested_state_hdr svm;
d36f7de8
CH
502
503 /* Pad the header to 128 bytes. */
504 __u8 pad[120];
1d33bea4 505 } hdr;
d36f7de8 506
1d33bea4
LA
507 /*
508 * Define data region as 0 bytes to preserve backwards-compatability
509 * to old definition of kvm_nested_state in order to avoid changing
510 * KVM_{GET,PUT}_NESTED_STATE ioctl values.
511 */
512 union {
513 struct kvm_vmx_nested_state_data vmx[0];
f76b348e 514 struct kvm_svm_nested_state_data svm[0];
1d33bea4 515 } data;
d36f7de8
CH
516};
517
f363d039
EA
518/* for KVM_CAP_PMU_EVENT_FILTER */
519struct kvm_pmu_event_filter {
520 __u32 action;
521 __u32 nevents;
522 __u32 fixed_counter_bitmap;
523 __u32 flags;
524 __u32 pad[4];
d525f73f 525 __u64 events[];
f363d039
EA
526};
527
528#define KVM_PMU_EVENT_ALLOW 0
529#define KVM_PMU_EVENT_DENY 1
530
43709a0c
PB
531/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
532#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
533#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
534
51b24e34 535#endif /* _ASM_X86_KVM_H */