]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/kvm/vmx/evmcs.h
KVM: VMX: Move eVMCS code to dedicated files
[thirdparty/linux.git] / arch / x86 / kvm / vmx / evmcs.h
CommitLineData
773e8a04
VK
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_EVMCS_H
3#define __KVM_X86_VMX_EVMCS_H
4
75edce8a 5#include <linux/jump_label.h>
773e8a04 6
75edce8a
SC
7#include <asm/hyperv-tlfs.h>
8#include <asm/mshyperv.h>
9#include <asm/vmx.h>
10
11#include "capabilities.h"
12#include "vmcs.h"
13
14struct vmcs_config;
15
16DECLARE_STATIC_KEY_FALSE(enable_evmcs);
17
18#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
19
20#define KVM_EVMCS_VERSION 1
21
22/*
23 * Enlightened VMCSv1 doesn't support these:
24 *
25 * POSTED_INTR_NV = 0x00000002,
26 * GUEST_INTR_STATUS = 0x00000810,
27 * APIC_ACCESS_ADDR = 0x00002014,
28 * POSTED_INTR_DESC_ADDR = 0x00002016,
29 * EOI_EXIT_BITMAP0 = 0x0000201c,
30 * EOI_EXIT_BITMAP1 = 0x0000201e,
31 * EOI_EXIT_BITMAP2 = 0x00002020,
32 * EOI_EXIT_BITMAP3 = 0x00002022,
33 * GUEST_PML_INDEX = 0x00000812,
34 * PML_ADDRESS = 0x0000200e,
35 * VM_FUNCTION_CONTROL = 0x00002018,
36 * EPTP_LIST_ADDRESS = 0x00002024,
37 * VMREAD_BITMAP = 0x00002026,
38 * VMWRITE_BITMAP = 0x00002028,
39 *
40 * TSC_MULTIPLIER = 0x00002032,
41 * PLE_GAP = 0x00004020,
42 * PLE_WINDOW = 0x00004022,
43 * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
44 * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
45 * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
46 *
47 * Currently unsupported in KVM:
48 * GUEST_IA32_RTIT_CTL = 0x00002814,
49 */
50#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
51 PIN_BASED_VMX_PREEMPTION_TIMER)
52#define EVMCS1_UNSUPPORTED_2NDEXEC \
53 (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
54 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
55 SECONDARY_EXEC_APIC_REGISTER_VIRT | \
56 SECONDARY_EXEC_ENABLE_PML | \
57 SECONDARY_EXEC_ENABLE_VMFUNC | \
58 SECONDARY_EXEC_SHADOW_VMCS | \
59 SECONDARY_EXEC_TSC_SCALING | \
60 SECONDARY_EXEC_PAUSE_LOOP_EXITING)
61#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
62#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
63#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
64
65#if IS_ENABLED(CONFIG_HYPERV)
773e8a04
VK
66
67struct evmcs_field {
68 u16 offset;
69 u16 clean_field;
70};
71
75edce8a
SC
72extern const struct evmcs_field vmcs_field_to_evmcs_1[];
73extern const unsigned int nr_evmcs_1_fields;
74
75#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
773e8a04
VK
76
77static __always_inline int get_evmcs_offset(unsigned long field,
78 u16 *clean_field)
79{
80 unsigned int index = ROL16(field, 6);
81 const struct evmcs_field *evmcs_field;
82
75edce8a 83 if (unlikely(index >= nr_evmcs_1_fields)) {
773e8a04
VK
84 WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
85 field);
86 return -ENOENT;
87 }
88
89 evmcs_field = &vmcs_field_to_evmcs_1[index];
90
91 if (clean_field)
92 *clean_field = evmcs_field->clean_field;
93
94 return evmcs_field->offset;
95}
96
97#undef ROL16
98
75edce8a
SC
99static inline void evmcs_write64(unsigned long field, u64 value)
100{
101 u16 clean_field;
102 int offset = get_evmcs_offset(field, &clean_field);
103
104 if (offset < 0)
105 return;
106
107 *(u64 *)((char *)current_evmcs + offset) = value;
108
109 current_evmcs->hv_clean_fields &= ~clean_field;
110}
111
112static inline void evmcs_write32(unsigned long field, u32 value)
113{
114 u16 clean_field;
115 int offset = get_evmcs_offset(field, &clean_field);
116
117 if (offset < 0)
118 return;
119
120 *(u32 *)((char *)current_evmcs + offset) = value;
121 current_evmcs->hv_clean_fields &= ~clean_field;
122}
123
124static inline void evmcs_write16(unsigned long field, u16 value)
125{
126 u16 clean_field;
127 int offset = get_evmcs_offset(field, &clean_field);
128
129 if (offset < 0)
130 return;
131
132 *(u16 *)((char *)current_evmcs + offset) = value;
133 current_evmcs->hv_clean_fields &= ~clean_field;
134}
135
136static inline u64 evmcs_read64(unsigned long field)
137{
138 int offset = get_evmcs_offset(field, NULL);
139
140 if (offset < 0)
141 return 0;
142
143 return *(u64 *)((char *)current_evmcs + offset);
144}
145
146static inline u32 evmcs_read32(unsigned long field)
147{
148 int offset = get_evmcs_offset(field, NULL);
149
150 if (offset < 0)
151 return 0;
152
153 return *(u32 *)((char *)current_evmcs + offset);
154}
155
156static inline u16 evmcs_read16(unsigned long field)
157{
158 int offset = get_evmcs_offset(field, NULL);
159
160 if (offset < 0)
161 return 0;
162
163 return *(u16 *)((char *)current_evmcs + offset);
164}
165
166static inline void evmcs_touch_msr_bitmap(void)
167{
168 if (unlikely(!current_evmcs))
169 return;
170
171 if (current_evmcs->hv_enlightenments_control.msr_bitmap)
172 current_evmcs->hv_clean_fields &=
173 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
174}
175
176static inline void evmcs_load(u64 phys_addr)
177{
178 struct hv_vp_assist_page *vp_ap =
179 hv_get_vp_assist_page(smp_processor_id());
180
181 vp_ap->current_nested_vmcs = phys_addr;
182 vp_ap->enlighten_vmentry = 1;
183}
184
185void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
186#else /* !IS_ENABLED(CONFIG_HYPERV) */
187static inline void evmcs_write64(unsigned long field, u64 value) {}
188static inline void evmcs_write32(unsigned long field, u32 value) {}
189static inline void evmcs_write16(unsigned long field, u16 value) {}
190static inline u64 evmcs_read64(unsigned long field) { return 0; }
191static inline u32 evmcs_read32(unsigned long field) { return 0; }
192static inline u16 evmcs_read16(unsigned long field) { return 0; }
193static inline void evmcs_load(u64 phys_addr) {}
194static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
195static inline void evmcs_touch_msr_bitmap(void) {}
196#endif /* IS_ENABLED(CONFIG_HYPERV) */
197
198int nested_enable_evmcs(struct kvm_vcpu *vcpu,
199 uint16_t *vmcs_version);
200
773e8a04 201#endif /* __KVM_X86_VMX_EVMCS_H */