]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/i386/cpu.c
qdev: set properties with device_class_set_props()
[thirdparty/qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
34
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
48
49 #include "standard-headers/asm-x86/kvm_para.h"
50
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_7_1_EAX_FEATURES 0
774 #define TCG_APM_FEATURES 0
775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
777 /* missing:
778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
779
780 typedef enum FeatureWordType {
781 CPUID_FEATURE_WORD,
782 MSR_FEATURE_WORD,
783 } FeatureWordType;
784
785 typedef struct FeatureWordInfo {
786 FeatureWordType type;
787 /* feature flags names are taken from "Intel Processor Identification and
788 * the CPUID Instruction" and AMD's "CPUID Specification".
789 * In cases of disagreement between feature naming conventions,
790 * aliases may be added.
791 */
792 const char *feat_names[64];
793 union {
794 /* If type==CPUID_FEATURE_WORD */
795 struct {
796 uint32_t eax; /* Input EAX for CPUID */
797 bool needs_ecx; /* CPUID instruction uses ECX as input */
798 uint32_t ecx; /* Input ECX value for CPUID */
799 int reg; /* output register (R_* constant) */
800 } cpuid;
801 /* If type==MSR_FEATURE_WORD */
802 struct {
803 uint32_t index;
804 } msr;
805 };
806 uint64_t tcg_features; /* Feature flags supported by TCG */
807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
808 uint64_t migratable_flags; /* Feature flags known to be migratable */
809 /* Features that shouldn't be auto-enabled by "-cpu host" */
810 uint64_t no_autoenable_flags;
811 } FeatureWordInfo;
812
813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
814 [FEAT_1_EDX] = {
815 .type = CPUID_FEATURE_WORD,
816 .feat_names = {
817 "fpu", "vme", "de", "pse",
818 "tsc", "msr", "pae", "mce",
819 "cx8", "apic", NULL, "sep",
820 "mtrr", "pge", "mca", "cmov",
821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
822 NULL, "ds" /* Intel dts */, "acpi", "mmx",
823 "fxsr", "sse", "sse2", "ss",
824 "ht" /* Intel htt */, "tm", "ia64", "pbe",
825 },
826 .cpuid = {.eax = 1, .reg = R_EDX, },
827 .tcg_features = TCG_FEATURES,
828 },
829 [FEAT_1_ECX] = {
830 .type = CPUID_FEATURE_WORD,
831 .feat_names = {
832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
833 "ds-cpl", "vmx", "smx", "est",
834 "tm2", "ssse3", "cid", NULL,
835 "fma", "cx16", "xtpr", "pdcm",
836 NULL, "pcid", "dca", "sse4.1",
837 "sse4.2", "x2apic", "movbe", "popcnt",
838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
839 "avx", "f16c", "rdrand", "hypervisor",
840 },
841 .cpuid = { .eax = 1, .reg = R_ECX, },
842 .tcg_features = TCG_EXT_FEATURES,
843 },
844 /* Feature names that are already defined on feature_name[] but
845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
846 * names on feat_names below. They are copied automatically
847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
848 */
849 [FEAT_8000_0001_EDX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
857 "nx", NULL, "mmxext", NULL /* mmx */,
858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
859 NULL, "lm", "3dnowext", "3dnow",
860 },
861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
862 .tcg_features = TCG_EXT2_FEATURES,
863 },
864 [FEAT_8000_0001_ECX] = {
865 .type = CPUID_FEATURE_WORD,
866 .feat_names = {
867 "lahf-lm", "cmp-legacy", "svm", "extapic",
868 "cr8legacy", "abm", "sse4a", "misalignsse",
869 "3dnowprefetch", "osvw", "ibs", "xop",
870 "skinit", "wdt", NULL, "lwp",
871 "fma4", "tce", NULL, "nodeid-msr",
872 NULL, "tbm", "topoext", "perfctr-core",
873 "perfctr-nb", NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
875 },
876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
877 .tcg_features = TCG_EXT3_FEATURES,
878 /*
879 * TOPOEXT is always allowed but can't be enabled blindly by
880 * "-cpu host", as it requires consistent cache topology info
881 * to be provided so it doesn't confuse guests.
882 */
883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
884 },
885 [FEAT_C000_0001_EDX] = {
886 .type = CPUID_FEATURE_WORD,
887 .feat_names = {
888 NULL, NULL, "xstore", "xstore-en",
889 NULL, NULL, "xcrypt", "xcrypt-en",
890 "ace2", "ace2-en", "phe", "phe-en",
891 "pmm", "pmm-en", NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 },
897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
898 .tcg_features = TCG_EXT4_FEATURES,
899 },
900 [FEAT_KVM] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 "kvmclock-stable-bit", NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 },
912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
913 .tcg_features = TCG_KVM_FEATURES,
914 },
915 [FEAT_KVM_HINTS] = {
916 .type = CPUID_FEATURE_WORD,
917 .feat_names = {
918 "kvm-hint-dedicated", NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 },
927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
928 .tcg_features = TCG_KVM_FEATURES,
929 /*
930 * KVM hints aren't auto-enabled by -cpu host, they need to be
931 * explicitly enabled in the command-line.
932 */
933 .no_autoenable_flags = ~0U,
934 },
935 /*
936 * .feat_names are commented out for Hyper-V enlightenments because we
937 * don't want to have two different ways for enabling them on QEMU command
938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
939 * enabling several feature bits simultaneously, exposing these bits
940 * individually may just confuse guests.
941 */
942 [FEAT_HYPERV_EAX] = {
943 .type = CPUID_FEATURE_WORD,
944 .feat_names = {
945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
952 NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 },
958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
959 },
960 [FEAT_HYPERV_EBX] = {
961 .type = CPUID_FEATURE_WORD,
962 .feat_names = {
963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
965 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
966 NULL /* hv_create_port */, NULL /* hv_connect_port */,
967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
969 NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 },
975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
976 },
977 [FEAT_HYPERV_EDX] = {
978 .type = CPUID_FEATURE_WORD,
979 .feat_names = {
980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
983 NULL, NULL,
984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 },
991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
992 },
993 [FEAT_HV_RECOMM_EAX] = {
994 .type = CPUID_FEATURE_WORD,
995 .feat_names = {
996 NULL /* hv_recommend_pv_as_switch */,
997 NULL /* hv_recommend_pv_tlbflush_local */,
998 NULL /* hv_recommend_pv_tlbflush_remote */,
999 NULL /* hv_recommend_msr_apic_access */,
1000 NULL /* hv_recommend_msr_reset */,
1001 NULL /* hv_recommend_relaxed_timing */,
1002 NULL /* hv_recommend_dma_remapping */,
1003 NULL /* hv_recommend_int_remapping */,
1004 NULL /* hv_recommend_x2apic_msrs */,
1005 NULL /* hv_recommend_autoeoi_deprecation */,
1006 NULL /* hv_recommend_pv_ipi */,
1007 NULL /* hv_recommend_ex_hypercalls */,
1008 NULL /* hv_hypervisor_is_nested */,
1009 NULL /* hv_recommend_int_mbec */,
1010 NULL /* hv_recommend_evmcs */,
1011 NULL,
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1016 },
1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1018 },
1019 [FEAT_HV_NESTED_EAX] = {
1020 .type = CPUID_FEATURE_WORD,
1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1022 },
1023 [FEAT_SVM] = {
1024 .type = CPUID_FEATURE_WORD,
1025 .feat_names = {
1026 "npt", "lbrv", "svm-lock", "nrip-save",
1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1028 NULL, NULL, "pause-filter", NULL,
1029 "pfthreshold", NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 },
1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1036 .tcg_features = TCG_SVM_FEATURES,
1037 },
1038 [FEAT_7_0_EBX] = {
1039 .type = CPUID_FEATURE_WORD,
1040 .feat_names = {
1041 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1042 "hle", "avx2", NULL, "smep",
1043 "bmi2", "erms", "invpcid", "rtm",
1044 NULL, NULL, "mpx", NULL,
1045 "avx512f", "avx512dq", "rdseed", "adx",
1046 "smap", "avx512ifma", "pcommit", "clflushopt",
1047 "clwb", "intel-pt", "avx512pf", "avx512er",
1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1049 },
1050 .cpuid = {
1051 .eax = 7,
1052 .needs_ecx = true, .ecx = 0,
1053 .reg = R_EBX,
1054 },
1055 .tcg_features = TCG_7_0_EBX_FEATURES,
1056 },
1057 [FEAT_7_0_ECX] = {
1058 .type = CPUID_FEATURE_WORD,
1059 .feat_names = {
1060 NULL, "avx512vbmi", "umip", "pku",
1061 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1064 "la57", NULL, NULL, NULL,
1065 NULL, NULL, "rdpid", NULL,
1066 NULL, "cldemote", NULL, "movdiri",
1067 "movdir64b", NULL, NULL, NULL,
1068 },
1069 .cpuid = {
1070 .eax = 7,
1071 .needs_ecx = true, .ecx = 0,
1072 .reg = R_ECX,
1073 },
1074 .tcg_features = TCG_7_0_ECX_FEATURES,
1075 },
1076 [FEAT_7_0_EDX] = {
1077 .type = CPUID_FEATURE_WORD,
1078 .feat_names = {
1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, "md-clear", NULL,
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL /* pconfig */, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, "spec-ctrl", "stibp",
1086 NULL, "arch-capabilities", "core-capability", "ssbd",
1087 },
1088 .cpuid = {
1089 .eax = 7,
1090 .needs_ecx = true, .ecx = 0,
1091 .reg = R_EDX,
1092 },
1093 .tcg_features = TCG_7_0_EDX_FEATURES,
1094 },
1095 [FEAT_7_1_EAX] = {
1096 .type = CPUID_FEATURE_WORD,
1097 .feat_names = {
1098 NULL, NULL, NULL, NULL,
1099 NULL, "avx512-bf16", NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 },
1107 .cpuid = {
1108 .eax = 7,
1109 .needs_ecx = true, .ecx = 1,
1110 .reg = R_EAX,
1111 },
1112 .tcg_features = TCG_7_1_EAX_FEATURES,
1113 },
1114 [FEAT_8000_0007_EDX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 "invtsc", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1127 .tcg_features = TCG_APM_FEATURES,
1128 .unmigratable_flags = CPUID_APM_INVTSC,
1129 },
1130 [FEAT_8000_0008_EBX] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "clzero", NULL, "xsaveerptr", NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, "wbnoinvd", NULL, NULL,
1136 "ibpb", NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1143 .tcg_features = 0,
1144 .unmigratable_flags = 0,
1145 },
1146 [FEAT_XSAVE] = {
1147 .type = CPUID_FEATURE_WORD,
1148 .feat_names = {
1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 },
1158 .cpuid = {
1159 .eax = 0xd,
1160 .needs_ecx = true, .ecx = 1,
1161 .reg = R_EAX,
1162 },
1163 .tcg_features = TCG_XSAVE_FEATURES,
1164 },
1165 [FEAT_6_EAX] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .feat_names = {
1168 NULL, NULL, "arat", NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL,
1172 NULL, NULL, NULL, NULL,
1173 NULL, NULL, NULL, NULL,
1174 NULL, NULL, NULL, NULL,
1175 NULL, NULL, NULL, NULL,
1176 },
1177 .cpuid = { .eax = 6, .reg = R_EAX, },
1178 .tcg_features = TCG_6_EAX_FEATURES,
1179 },
1180 [FEAT_XSAVE_COMP_LO] = {
1181 .type = CPUID_FEATURE_WORD,
1182 .cpuid = {
1183 .eax = 0xD,
1184 .needs_ecx = true, .ecx = 0,
1185 .reg = R_EAX,
1186 },
1187 .tcg_features = ~0U,
1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1191 XSTATE_PKRU_MASK,
1192 },
1193 [FEAT_XSAVE_COMP_HI] = {
1194 .type = CPUID_FEATURE_WORD,
1195 .cpuid = {
1196 .eax = 0xD,
1197 .needs_ecx = true, .ecx = 0,
1198 .reg = R_EDX,
1199 },
1200 .tcg_features = ~0U,
1201 },
1202 /*Below are MSR exposed features*/
1203 [FEAT_ARCH_CAPABILITIES] = {
1204 .type = MSR_FEATURE_WORD,
1205 .feat_names = {
1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1207 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1208 "taa-no", NULL, NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 NULL, NULL, NULL, NULL,
1211 NULL, NULL, NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 },
1215 .msr = {
1216 .index = MSR_IA32_ARCH_CAPABILITIES,
1217 },
1218 },
1219 [FEAT_CORE_CAPABILITY] = {
1220 .type = MSR_FEATURE_WORD,
1221 .feat_names = {
1222 NULL, NULL, NULL, NULL,
1223 NULL, "split-lock-detect", NULL, NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL,
1229 NULL, NULL, NULL, NULL,
1230 },
1231 .msr = {
1232 .index = MSR_IA32_CORE_CAPABILITY,
1233 },
1234 },
1235
1236 [FEAT_VMX_PROCBASED_CTLS] = {
1237 .type = MSR_FEATURE_WORD,
1238 .feat_names = {
1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1240 NULL, NULL, NULL, "vmx-hlt-exit",
1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1247 },
1248 .msr = {
1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1250 }
1251 },
1252
1253 [FEAT_VMX_SECONDARY_CTLS] = {
1254 .type = MSR_FEATURE_WORD,
1255 .feat_names = {
1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1261 "vmx-xsaves", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL,
1264 },
1265 .msr = {
1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1267 }
1268 },
1269
1270 [FEAT_VMX_PINBASED_CTLS] = {
1271 .type = MSR_FEATURE_WORD,
1272 .feat_names = {
1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1275 NULL, NULL, NULL, NULL,
1276 NULL, NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL,
1280 NULL, NULL, NULL, NULL,
1281 },
1282 .msr = {
1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1284 }
1285 },
1286
1287 [FEAT_VMX_EXIT_CTLS] = {
1288 .type = MSR_FEATURE_WORD,
1289 /*
1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1291 * the LM CPUID bit.
1292 */
1293 .feat_names = {
1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1295 NULL, NULL, NULL, NULL,
1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1299 "vmx-exit-save-efer", "vmx-exit-load-efer",
1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1302 NULL, NULL, NULL, NULL,
1303 },
1304 .msr = {
1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1306 }
1307 },
1308
1309 [FEAT_VMX_ENTRY_CTLS] = {
1310 .type = MSR_FEATURE_WORD,
1311 .feat_names = {
1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1313 NULL, NULL, NULL, NULL,
1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1317 NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL,
1319 NULL, NULL, NULL, NULL,
1320 },
1321 .msr = {
1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1323 }
1324 },
1325
1326 [FEAT_VMX_MISC] = {
1327 .type = MSR_FEATURE_WORD,
1328 .feat_names = {
1329 NULL, NULL, NULL, NULL,
1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1331 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL,
1334 NULL, NULL, NULL, NULL,
1335 NULL, NULL, NULL, NULL,
1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1337 },
1338 .msr = {
1339 .index = MSR_IA32_VMX_MISC,
1340 }
1341 },
1342
1343 [FEAT_VMX_EPT_VPID_CAPS] = {
1344 .type = MSR_FEATURE_WORD,
1345 .feat_names = {
1346 "vmx-ept-execonly", NULL, NULL, NULL,
1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1348 NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL,
1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1353 NULL, NULL, NULL, NULL,
1354 "vmx-invvpid", NULL, NULL, NULL,
1355 NULL, NULL, NULL, NULL,
1356 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1358 NULL, NULL, NULL, NULL,
1359 NULL, NULL, NULL, NULL,
1360 NULL, NULL, NULL, NULL,
1361 NULL, NULL, NULL, NULL,
1362 NULL, NULL, NULL, NULL,
1363 },
1364 .msr = {
1365 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1366 }
1367 },
1368
1369 [FEAT_VMX_BASIC] = {
1370 .type = MSR_FEATURE_WORD,
1371 .feat_names = {
1372 [54] = "vmx-ins-outs",
1373 [55] = "vmx-true-ctls",
1374 },
1375 .msr = {
1376 .index = MSR_IA32_VMX_BASIC,
1377 },
1378 /* Just to be safe - we don't support setting the MSEG version field. */
1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1380 },
1381
1382 [FEAT_VMX_VMFUNC] = {
1383 .type = MSR_FEATURE_WORD,
1384 .feat_names = {
1385 [0] = "vmx-eptp-switching",
1386 },
1387 .msr = {
1388 .index = MSR_IA32_VMX_VMFUNC,
1389 }
1390 },
1391
1392 };
1393
1394 typedef struct FeatureMask {
1395 FeatureWord index;
1396 uint64_t mask;
1397 } FeatureMask;
1398
1399 typedef struct FeatureDep {
1400 FeatureMask from, to;
1401 } FeatureDep;
1402
1403 static FeatureDep feature_dependencies[] = {
1404 {
1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1407 },
1408 {
1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1410 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1411 },
1412 {
1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1415 },
1416 {
1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1419 },
1420 {
1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1423 },
1424 {
1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1427 },
1428 {
1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1430 .to = { FEAT_VMX_MISC, ~0ull },
1431 },
1432 {
1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1434 .to = { FEAT_VMX_BASIC, ~0ull },
1435 },
1436 {
1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1439 },
1440 {
1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1443 },
1444 {
1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1447 },
1448 {
1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1451 },
1452 {
1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1455 },
1456 {
1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1459 },
1460 {
1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1463 },
1464 {
1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1467 },
1468 {
1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1471 },
1472 {
1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1475 },
1476 {
1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1478 .to = { FEAT_VMX_VMFUNC, ~0ull },
1479 },
1480 };
1481
1482 typedef struct X86RegisterInfo32 {
1483 /* Name of register */
1484 const char *name;
1485 /* QAPI enum value register */
1486 X86CPURegister32 qapi_enum;
1487 } X86RegisterInfo32;
1488
1489 #define REGISTER(reg) \
1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1492 REGISTER(EAX),
1493 REGISTER(ECX),
1494 REGISTER(EDX),
1495 REGISTER(EBX),
1496 REGISTER(ESP),
1497 REGISTER(EBP),
1498 REGISTER(ESI),
1499 REGISTER(EDI),
1500 };
1501 #undef REGISTER
1502
1503 typedef struct ExtSaveArea {
1504 uint32_t feature, bits;
1505 uint32_t offset, size;
1506 } ExtSaveArea;
1507
1508 static const ExtSaveArea x86_ext_save_areas[] = {
1509 [XSTATE_FP_BIT] = {
1510 /* x87 FP state component is always enabled if XSAVE is supported */
1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1512 /* x87 state is in the legacy region of the XSAVE area */
1513 .offset = 0,
1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1515 },
1516 [XSTATE_SSE_BIT] = {
1517 /* SSE state component is always enabled if XSAVE is supported */
1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1519 /* SSE state is in the legacy region of the XSAVE area */
1520 .offset = 0,
1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1522 },
1523 [XSTATE_YMM_BIT] =
1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1525 .offset = offsetof(X86XSaveArea, avx_state),
1526 .size = sizeof(XSaveAVX) },
1527 [XSTATE_BNDREGS_BIT] =
1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1529 .offset = offsetof(X86XSaveArea, bndreg_state),
1530 .size = sizeof(XSaveBNDREG) },
1531 [XSTATE_BNDCSR_BIT] =
1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1533 .offset = offsetof(X86XSaveArea, bndcsr_state),
1534 .size = sizeof(XSaveBNDCSR) },
1535 [XSTATE_OPMASK_BIT] =
1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1537 .offset = offsetof(X86XSaveArea, opmask_state),
1538 .size = sizeof(XSaveOpmask) },
1539 [XSTATE_ZMM_Hi256_BIT] =
1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1542 .size = sizeof(XSaveZMM_Hi256) },
1543 [XSTATE_Hi16_ZMM_BIT] =
1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1546 .size = sizeof(XSaveHi16_ZMM) },
1547 [XSTATE_PKRU_BIT] =
1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1549 .offset = offsetof(X86XSaveArea, pkru_state),
1550 .size = sizeof(XSavePKRU) },
1551 };
1552
1553 static uint32_t xsave_area_size(uint64_t mask)
1554 {
1555 int i;
1556 uint64_t ret = 0;
1557
1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1559 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1560 if ((mask >> i) & 1) {
1561 ret = MAX(ret, esa->offset + esa->size);
1562 }
1563 }
1564 return ret;
1565 }
1566
1567 static inline bool accel_uses_host_cpuid(void)
1568 {
1569 return kvm_enabled() || hvf_enabled();
1570 }
1571
1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1573 {
1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1575 cpu->env.features[FEAT_XSAVE_COMP_LO];
1576 }
1577
1578 const char *get_register_name_32(unsigned int reg)
1579 {
1580 if (reg >= CPU_NB_REGS32) {
1581 return NULL;
1582 }
1583 return x86_reg_info_32[reg].name;
1584 }
1585
1586 /*
1587 * Returns the set of feature flags that are supported and migratable by
1588 * QEMU, for a given FeatureWord.
1589 */
1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1591 {
1592 FeatureWordInfo *wi = &feature_word_info[w];
1593 uint64_t r = 0;
1594 int i;
1595
1596 for (i = 0; i < 64; i++) {
1597 uint64_t f = 1ULL << i;
1598
1599 /* If the feature name is known, it is implicitly considered migratable,
1600 * unless it is explicitly set in unmigratable_flags */
1601 if ((wi->migratable_flags & f) ||
1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1603 r |= f;
1604 }
1605 }
1606 return r;
1607 }
1608
1609 void host_cpuid(uint32_t function, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1611 {
1612 uint32_t vec[4];
1613
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #elif defined(__i386__)
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #else
1630 abort();
1631 #endif
1632
1633 if (eax)
1634 *eax = vec[0];
1635 if (ebx)
1636 *ebx = vec[1];
1637 if (ecx)
1638 *ecx = vec[2];
1639 if (edx)
1640 *edx = vec[3];
1641 }
1642
1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1644 {
1645 uint32_t eax, ebx, ecx, edx;
1646
1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1649
1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1651 if (family) {
1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1653 }
1654 if (model) {
1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1656 }
1657 if (stepping) {
1658 *stepping = eax & 0x0F;
1659 }
1660 }
1661
1662 /* CPU class name definitions: */
1663
1664 /* Return type name for a given CPU model name
1665 * Caller is responsible for freeing the returned string.
1666 */
1667 static char *x86_cpu_type_name(const char *model_name)
1668 {
1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1670 }
1671
1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1673 {
1674 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1675 return object_class_by_name(typename);
1676 }
1677
1678 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1679 {
1680 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1681 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1682 return g_strndup(class_name,
1683 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1684 }
1685
1686 typedef struct PropValue {
1687 const char *prop, *value;
1688 } PropValue;
1689
1690 typedef struct X86CPUVersionDefinition {
1691 X86CPUVersion version;
1692 const char *alias;
1693 PropValue *props;
1694 } X86CPUVersionDefinition;
1695
1696 /* Base definition for a CPU model */
1697 typedef struct X86CPUDefinition {
1698 const char *name;
1699 uint32_t level;
1700 uint32_t xlevel;
1701 /* vendor is zero-terminated, 12 character ASCII string */
1702 char vendor[CPUID_VENDOR_SZ + 1];
1703 int family;
1704 int model;
1705 int stepping;
1706 FeatureWordArray features;
1707 const char *model_id;
1708 CPUCaches *cache_info;
1709 /*
1710 * Definitions for alternative versions of CPU model.
1711 * List is terminated by item with version == 0.
1712 * If NULL, version 1 will be registered automatically.
1713 */
1714 const X86CPUVersionDefinition *versions;
1715 } X86CPUDefinition;
1716
1717 /* Reference to a specific CPU model version */
1718 struct X86CPUModel {
1719 /* Base CPU definition */
1720 X86CPUDefinition *cpudef;
1721 /* CPU model version */
1722 X86CPUVersion version;
1723 /*
1724 * If true, this is an alias CPU model.
1725 * This matters only for "-cpu help" and query-cpu-definitions
1726 */
1727 bool is_alias;
1728 };
1729
1730 /* Get full model name for CPU version */
1731 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1732 X86CPUVersion version)
1733 {
1734 assert(version > 0);
1735 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1736 }
1737
1738 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1739 {
1740 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1741 static const X86CPUVersionDefinition default_version_list[] = {
1742 { 1 },
1743 { /* end of list */ }
1744 };
1745
1746 return def->versions ?: default_version_list;
1747 }
1748
1749 static CPUCaches epyc_cache_info = {
1750 .l1d_cache = &(CPUCacheInfo) {
1751 .type = DATA_CACHE,
1752 .level = 1,
1753 .size = 32 * KiB,
1754 .line_size = 64,
1755 .associativity = 8,
1756 .partitions = 1,
1757 .sets = 64,
1758 .lines_per_tag = 1,
1759 .self_init = 1,
1760 .no_invd_sharing = true,
1761 },
1762 .l1i_cache = &(CPUCacheInfo) {
1763 .type = INSTRUCTION_CACHE,
1764 .level = 1,
1765 .size = 64 * KiB,
1766 .line_size = 64,
1767 .associativity = 4,
1768 .partitions = 1,
1769 .sets = 256,
1770 .lines_per_tag = 1,
1771 .self_init = 1,
1772 .no_invd_sharing = true,
1773 },
1774 .l2_cache = &(CPUCacheInfo) {
1775 .type = UNIFIED_CACHE,
1776 .level = 2,
1777 .size = 512 * KiB,
1778 .line_size = 64,
1779 .associativity = 8,
1780 .partitions = 1,
1781 .sets = 1024,
1782 .lines_per_tag = 1,
1783 },
1784 .l3_cache = &(CPUCacheInfo) {
1785 .type = UNIFIED_CACHE,
1786 .level = 3,
1787 .size = 8 * MiB,
1788 .line_size = 64,
1789 .associativity = 16,
1790 .partitions = 1,
1791 .sets = 8192,
1792 .lines_per_tag = 1,
1793 .self_init = true,
1794 .inclusive = true,
1795 .complex_indexing = true,
1796 },
1797 };
1798
1799 /* The following VMX features are not supported by KVM and are left out in the
1800 * CPU definitions:
1801 *
1802 * Dual-monitor support (all processors)
1803 * Entry to SMM
1804 * Deactivate dual-monitor treatment
1805 * Number of CR3-target values
1806 * Shutdown activity state
1807 * Wait-for-SIPI activity state
1808 * PAUSE-loop exiting (Westmere and newer)
1809 * EPT-violation #VE (Broadwell and newer)
1810 * Inject event with insn length=0 (Skylake and newer)
1811 * Conceal non-root operation from PT
1812 * Conceal VM exits from PT
1813 * Conceal VM entries from PT
1814 * Enable ENCLS exiting
1815 * Mode-based execute control (XS/XU)
1816 s TSC scaling (Skylake Server and newer)
1817 * GPA translation for PT (IceLake and newer)
1818 * User wait and pause
1819 * ENCLV exiting
1820 * Load IA32_RTIT_CTL
1821 * Clear IA32_RTIT_CTL
1822 * Advanced VM-exit information for EPT violations
1823 * Sub-page write permissions
1824 * PT in VMX operation
1825 */
1826
1827 static X86CPUDefinition builtin_x86_defs[] = {
1828 {
1829 .name = "qemu64",
1830 .level = 0xd,
1831 .vendor = CPUID_VENDOR_AMD,
1832 .family = 6,
1833 .model = 6,
1834 .stepping = 3,
1835 .features[FEAT_1_EDX] =
1836 PPRO_FEATURES |
1837 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1838 CPUID_PSE36,
1839 .features[FEAT_1_ECX] =
1840 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1841 .features[FEAT_8000_0001_EDX] =
1842 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1843 .features[FEAT_8000_0001_ECX] =
1844 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1845 .xlevel = 0x8000000A,
1846 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1847 },
1848 {
1849 .name = "phenom",
1850 .level = 5,
1851 .vendor = CPUID_VENDOR_AMD,
1852 .family = 16,
1853 .model = 2,
1854 .stepping = 3,
1855 /* Missing: CPUID_HT */
1856 .features[FEAT_1_EDX] =
1857 PPRO_FEATURES |
1858 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1859 CPUID_PSE36 | CPUID_VME,
1860 .features[FEAT_1_ECX] =
1861 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1862 CPUID_EXT_POPCNT,
1863 .features[FEAT_8000_0001_EDX] =
1864 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1865 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1866 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1867 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1868 CPUID_EXT3_CR8LEG,
1869 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1870 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1871 .features[FEAT_8000_0001_ECX] =
1872 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1873 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1874 /* Missing: CPUID_SVM_LBRV */
1875 .features[FEAT_SVM] =
1876 CPUID_SVM_NPT,
1877 .xlevel = 0x8000001A,
1878 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1879 },
1880 {
1881 .name = "core2duo",
1882 .level = 10,
1883 .vendor = CPUID_VENDOR_INTEL,
1884 .family = 6,
1885 .model = 15,
1886 .stepping = 11,
1887 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1888 .features[FEAT_1_EDX] =
1889 PPRO_FEATURES |
1890 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1891 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1892 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1893 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1894 .features[FEAT_1_ECX] =
1895 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1896 CPUID_EXT_CX16,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1899 .features[FEAT_8000_0001_ECX] =
1900 CPUID_EXT3_LAHF_LM,
1901 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1902 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1903 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1904 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1905 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1906 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1907 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1908 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1909 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1910 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1911 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1912 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1913 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1914 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1915 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1916 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1917 .features[FEAT_VMX_SECONDARY_CTLS] =
1918 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1921 },
1922 {
1923 .name = "kvm64",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 15,
1927 .model = 6,
1928 .stepping = 1,
1929 /* Missing: CPUID_HT */
1930 .features[FEAT_1_EDX] =
1931 PPRO_FEATURES | CPUID_VME |
1932 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1933 CPUID_PSE36,
1934 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1937 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1938 .features[FEAT_8000_0001_EDX] =
1939 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1940 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1941 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1942 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1943 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1944 .features[FEAT_8000_0001_ECX] =
1945 0,
1946 /* VMX features from Cedar Mill/Prescott */
1947 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1948 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1949 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1950 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1951 VMX_PIN_BASED_NMI_EXITING,
1952 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1953 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1954 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1955 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1956 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1957 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1958 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1959 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1960 .xlevel = 0x80000008,
1961 .model_id = "Common KVM processor"
1962 },
1963 {
1964 .name = "qemu32",
1965 .level = 4,
1966 .vendor = CPUID_VENDOR_INTEL,
1967 .family = 6,
1968 .model = 6,
1969 .stepping = 3,
1970 .features[FEAT_1_EDX] =
1971 PPRO_FEATURES,
1972 .features[FEAT_1_ECX] =
1973 CPUID_EXT_SSE3,
1974 .xlevel = 0x80000004,
1975 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1976 },
1977 {
1978 .name = "kvm32",
1979 .level = 5,
1980 .vendor = CPUID_VENDOR_INTEL,
1981 .family = 15,
1982 .model = 6,
1983 .stepping = 1,
1984 .features[FEAT_1_EDX] =
1985 PPRO_FEATURES | CPUID_VME |
1986 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1987 .features[FEAT_1_ECX] =
1988 CPUID_EXT_SSE3,
1989 .features[FEAT_8000_0001_ECX] =
1990 0,
1991 /* VMX features from Yonah */
1992 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1993 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1994 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1995 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1996 VMX_PIN_BASED_NMI_EXITING,
1997 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1998 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1999 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2000 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2001 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2002 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2003 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2004 .xlevel = 0x80000008,
2005 .model_id = "Common 32-bit KVM processor"
2006 },
2007 {
2008 .name = "coreduo",
2009 .level = 10,
2010 .vendor = CPUID_VENDOR_INTEL,
2011 .family = 6,
2012 .model = 14,
2013 .stepping = 8,
2014 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2015 .features[FEAT_1_EDX] =
2016 PPRO_FEATURES | CPUID_VME |
2017 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
2018 CPUID_SS,
2019 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
2020 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
2021 .features[FEAT_1_ECX] =
2022 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
2023 .features[FEAT_8000_0001_EDX] =
2024 CPUID_EXT2_NX,
2025 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2026 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2027 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2028 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2029 VMX_PIN_BASED_NMI_EXITING,
2030 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2031 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2032 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2033 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2034 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2035 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2036 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2037 .xlevel = 0x80000008,
2038 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2039 },
2040 {
2041 .name = "486",
2042 .level = 1,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 4,
2045 .model = 8,
2046 .stepping = 0,
2047 .features[FEAT_1_EDX] =
2048 I486_FEATURES,
2049 .xlevel = 0,
2050 .model_id = "",
2051 },
2052 {
2053 .name = "pentium",
2054 .level = 1,
2055 .vendor = CPUID_VENDOR_INTEL,
2056 .family = 5,
2057 .model = 4,
2058 .stepping = 3,
2059 .features[FEAT_1_EDX] =
2060 PENTIUM_FEATURES,
2061 .xlevel = 0,
2062 .model_id = "",
2063 },
2064 {
2065 .name = "pentium2",
2066 .level = 2,
2067 .vendor = CPUID_VENDOR_INTEL,
2068 .family = 6,
2069 .model = 5,
2070 .stepping = 2,
2071 .features[FEAT_1_EDX] =
2072 PENTIUM2_FEATURES,
2073 .xlevel = 0,
2074 .model_id = "",
2075 },
2076 {
2077 .name = "pentium3",
2078 .level = 3,
2079 .vendor = CPUID_VENDOR_INTEL,
2080 .family = 6,
2081 .model = 7,
2082 .stepping = 3,
2083 .features[FEAT_1_EDX] =
2084 PENTIUM3_FEATURES,
2085 .xlevel = 0,
2086 .model_id = "",
2087 },
2088 {
2089 .name = "athlon",
2090 .level = 2,
2091 .vendor = CPUID_VENDOR_AMD,
2092 .family = 6,
2093 .model = 2,
2094 .stepping = 3,
2095 .features[FEAT_1_EDX] =
2096 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2097 CPUID_MCA,
2098 .features[FEAT_8000_0001_EDX] =
2099 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2100 .xlevel = 0x80000008,
2101 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2102 },
2103 {
2104 .name = "n270",
2105 .level = 10,
2106 .vendor = CPUID_VENDOR_INTEL,
2107 .family = 6,
2108 .model = 28,
2109 .stepping = 2,
2110 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2111 .features[FEAT_1_EDX] =
2112 PPRO_FEATURES |
2113 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2114 CPUID_ACPI | CPUID_SS,
2115 /* Some CPUs got no CPUID_SEP */
2116 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2117 * CPUID_EXT_XTPR */
2118 .features[FEAT_1_ECX] =
2119 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2120 CPUID_EXT_MOVBE,
2121 .features[FEAT_8000_0001_EDX] =
2122 CPUID_EXT2_NX,
2123 .features[FEAT_8000_0001_ECX] =
2124 CPUID_EXT3_LAHF_LM,
2125 .xlevel = 0x80000008,
2126 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2127 },
2128 {
2129 .name = "Conroe",
2130 .level = 10,
2131 .vendor = CPUID_VENDOR_INTEL,
2132 .family = 6,
2133 .model = 15,
2134 .stepping = 3,
2135 .features[FEAT_1_EDX] =
2136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2140 CPUID_DE | CPUID_FP87,
2141 .features[FEAT_1_ECX] =
2142 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2143 .features[FEAT_8000_0001_EDX] =
2144 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2145 .features[FEAT_8000_0001_ECX] =
2146 CPUID_EXT3_LAHF_LM,
2147 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2148 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2149 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2150 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2151 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2152 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2153 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2154 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2155 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2156 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2157 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2158 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2159 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2160 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2161 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2162 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2163 .features[FEAT_VMX_SECONDARY_CTLS] =
2164 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2165 .xlevel = 0x80000008,
2166 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2167 },
2168 {
2169 .name = "Penryn",
2170 .level = 10,
2171 .vendor = CPUID_VENDOR_INTEL,
2172 .family = 6,
2173 .model = 23,
2174 .stepping = 3,
2175 .features[FEAT_1_EDX] =
2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2180 CPUID_DE | CPUID_FP87,
2181 .features[FEAT_1_ECX] =
2182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2183 CPUID_EXT_SSE3,
2184 .features[FEAT_8000_0001_EDX] =
2185 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2186 .features[FEAT_8000_0001_ECX] =
2187 CPUID_EXT3_LAHF_LM,
2188 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2189 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2190 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2191 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2192 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2193 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2194 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2195 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2196 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2197 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2198 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2199 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2200 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2201 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2202 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2203 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2204 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2205 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2206 .features[FEAT_VMX_SECONDARY_CTLS] =
2207 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2208 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2209 .xlevel = 0x80000008,
2210 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2211 },
2212 {
2213 .name = "Nehalem",
2214 .level = 11,
2215 .vendor = CPUID_VENDOR_INTEL,
2216 .family = 6,
2217 .model = 26,
2218 .stepping = 3,
2219 .features[FEAT_1_EDX] =
2220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2224 CPUID_DE | CPUID_FP87,
2225 .features[FEAT_1_ECX] =
2226 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2227 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2228 .features[FEAT_8000_0001_EDX] =
2229 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2230 .features[FEAT_8000_0001_ECX] =
2231 CPUID_EXT3_LAHF_LM,
2232 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2233 MSR_VMX_BASIC_TRUE_CTLS,
2234 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2235 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2236 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2237 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2238 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2239 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2240 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2241 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2243 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2244 .features[FEAT_VMX_EXIT_CTLS] =
2245 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2246 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2247 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2248 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2249 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2250 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2251 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2252 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2253 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2254 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2255 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2256 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2257 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2258 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2259 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2260 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2261 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2262 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2263 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2264 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2265 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2266 .features[FEAT_VMX_SECONDARY_CTLS] =
2267 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2268 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2269 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2270 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2271 VMX_SECONDARY_EXEC_ENABLE_VPID,
2272 .xlevel = 0x80000008,
2273 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2274 .versions = (X86CPUVersionDefinition[]) {
2275 { .version = 1 },
2276 {
2277 .version = 2,
2278 .alias = "Nehalem-IBRS",
2279 .props = (PropValue[]) {
2280 { "spec-ctrl", "on" },
2281 { "model-id",
2282 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2283 { /* end of list */ }
2284 }
2285 },
2286 { /* end of list */ }
2287 }
2288 },
2289 {
2290 .name = "Westmere",
2291 .level = 11,
2292 .vendor = CPUID_VENDOR_INTEL,
2293 .family = 6,
2294 .model = 44,
2295 .stepping = 1,
2296 .features[FEAT_1_EDX] =
2297 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2298 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2299 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2300 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2301 CPUID_DE | CPUID_FP87,
2302 .features[FEAT_1_ECX] =
2303 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2304 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2305 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2306 .features[FEAT_8000_0001_EDX] =
2307 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2308 .features[FEAT_8000_0001_ECX] =
2309 CPUID_EXT3_LAHF_LM,
2310 .features[FEAT_6_EAX] =
2311 CPUID_6_EAX_ARAT,
2312 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2313 MSR_VMX_BASIC_TRUE_CTLS,
2314 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2315 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2316 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2317 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2318 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2319 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2320 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2321 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2323 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2324 .features[FEAT_VMX_EXIT_CTLS] =
2325 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2326 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2327 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2328 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2329 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2330 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2331 MSR_VMX_MISC_STORE_LMA,
2332 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2333 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2334 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2335 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2336 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2337 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2338 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2339 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2340 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2341 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2342 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2343 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2344 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2345 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2346 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2347 .features[FEAT_VMX_SECONDARY_CTLS] =
2348 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2349 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2350 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2351 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2352 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2353 .xlevel = 0x80000008,
2354 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2355 .versions = (X86CPUVersionDefinition[]) {
2356 { .version = 1 },
2357 {
2358 .version = 2,
2359 .alias = "Westmere-IBRS",
2360 .props = (PropValue[]) {
2361 { "spec-ctrl", "on" },
2362 { "model-id",
2363 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2364 { /* end of list */ }
2365 }
2366 },
2367 { /* end of list */ }
2368 }
2369 },
2370 {
2371 .name = "SandyBridge",
2372 .level = 0xd,
2373 .vendor = CPUID_VENDOR_INTEL,
2374 .family = 6,
2375 .model = 42,
2376 .stepping = 1,
2377 .features[FEAT_1_EDX] =
2378 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2379 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2380 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2381 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2382 CPUID_DE | CPUID_FP87,
2383 .features[FEAT_1_ECX] =
2384 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2385 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2386 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2387 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2388 CPUID_EXT_SSE3,
2389 .features[FEAT_8000_0001_EDX] =
2390 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2391 CPUID_EXT2_SYSCALL,
2392 .features[FEAT_8000_0001_ECX] =
2393 CPUID_EXT3_LAHF_LM,
2394 .features[FEAT_XSAVE] =
2395 CPUID_XSAVE_XSAVEOPT,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2399 MSR_VMX_BASIC_TRUE_CTLS,
2400 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2401 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2402 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2403 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2404 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2405 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2406 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2407 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2409 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2410 .features[FEAT_VMX_EXIT_CTLS] =
2411 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2412 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2413 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2414 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2415 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2416 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2417 MSR_VMX_MISC_STORE_LMA,
2418 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2419 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2420 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2421 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2422 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2423 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2424 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2425 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2426 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2427 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2428 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2429 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2430 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2431 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2432 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2433 .features[FEAT_VMX_SECONDARY_CTLS] =
2434 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2435 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2436 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2437 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2438 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2439 .xlevel = 0x80000008,
2440 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2441 .versions = (X86CPUVersionDefinition[]) {
2442 { .version = 1 },
2443 {
2444 .version = 2,
2445 .alias = "SandyBridge-IBRS",
2446 .props = (PropValue[]) {
2447 { "spec-ctrl", "on" },
2448 { "model-id",
2449 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2450 { /* end of list */ }
2451 }
2452 },
2453 { /* end of list */ }
2454 }
2455 },
2456 {
2457 .name = "IvyBridge",
2458 .level = 0xd,
2459 .vendor = CPUID_VENDOR_INTEL,
2460 .family = 6,
2461 .model = 58,
2462 .stepping = 9,
2463 .features[FEAT_1_EDX] =
2464 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2465 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2466 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2467 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2468 CPUID_DE | CPUID_FP87,
2469 .features[FEAT_1_ECX] =
2470 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2471 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2472 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2473 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2474 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2475 .features[FEAT_7_0_EBX] =
2476 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2477 CPUID_7_0_EBX_ERMS,
2478 .features[FEAT_8000_0001_EDX] =
2479 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2480 CPUID_EXT2_SYSCALL,
2481 .features[FEAT_8000_0001_ECX] =
2482 CPUID_EXT3_LAHF_LM,
2483 .features[FEAT_XSAVE] =
2484 CPUID_XSAVE_XSAVEOPT,
2485 .features[FEAT_6_EAX] =
2486 CPUID_6_EAX_ARAT,
2487 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2488 MSR_VMX_BASIC_TRUE_CTLS,
2489 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2490 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2491 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2492 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2493 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2494 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2495 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2496 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2498 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2499 .features[FEAT_VMX_EXIT_CTLS] =
2500 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2501 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2502 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2503 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2504 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2505 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2506 MSR_VMX_MISC_STORE_LMA,
2507 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2508 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2509 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2510 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2511 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2512 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2513 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2514 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2515 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2516 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2517 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2518 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2519 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2520 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2521 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2522 .features[FEAT_VMX_SECONDARY_CTLS] =
2523 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2524 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2525 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2526 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2527 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2528 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2529 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2530 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2531 .xlevel = 0x80000008,
2532 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2533 .versions = (X86CPUVersionDefinition[]) {
2534 { .version = 1 },
2535 {
2536 .version = 2,
2537 .alias = "IvyBridge-IBRS",
2538 .props = (PropValue[]) {
2539 { "spec-ctrl", "on" },
2540 { "model-id",
2541 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2542 { /* end of list */ }
2543 }
2544 },
2545 { /* end of list */ }
2546 }
2547 },
2548 {
2549 .name = "Haswell",
2550 .level = 0xd,
2551 .vendor = CPUID_VENDOR_INTEL,
2552 .family = 6,
2553 .model = 60,
2554 .stepping = 4,
2555 .features[FEAT_1_EDX] =
2556 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2557 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2558 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2559 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2560 CPUID_DE | CPUID_FP87,
2561 .features[FEAT_1_ECX] =
2562 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2563 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2564 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2565 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2566 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2567 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2568 .features[FEAT_8000_0001_EDX] =
2569 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2570 CPUID_EXT2_SYSCALL,
2571 .features[FEAT_8000_0001_ECX] =
2572 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2573 .features[FEAT_7_0_EBX] =
2574 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2575 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2576 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2577 CPUID_7_0_EBX_RTM,
2578 .features[FEAT_XSAVE] =
2579 CPUID_XSAVE_XSAVEOPT,
2580 .features[FEAT_6_EAX] =
2581 CPUID_6_EAX_ARAT,
2582 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2583 MSR_VMX_BASIC_TRUE_CTLS,
2584 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2585 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2586 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2587 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2588 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2589 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2590 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2591 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2593 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2594 .features[FEAT_VMX_EXIT_CTLS] =
2595 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2596 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2597 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2598 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2599 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2600 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2601 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2602 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2603 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2604 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2605 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2606 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2607 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2608 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2609 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2610 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2611 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2612 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2613 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2614 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2615 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2616 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2617 .features[FEAT_VMX_SECONDARY_CTLS] =
2618 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2619 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2620 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2621 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2622 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2623 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2624 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2625 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2626 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2627 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2628 .xlevel = 0x80000008,
2629 .model_id = "Intel Core Processor (Haswell)",
2630 .versions = (X86CPUVersionDefinition[]) {
2631 { .version = 1 },
2632 {
2633 .version = 2,
2634 .alias = "Haswell-noTSX",
2635 .props = (PropValue[]) {
2636 { "hle", "off" },
2637 { "rtm", "off" },
2638 { "stepping", "1" },
2639 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2640 { /* end of list */ }
2641 },
2642 },
2643 {
2644 .version = 3,
2645 .alias = "Haswell-IBRS",
2646 .props = (PropValue[]) {
2647 /* Restore TSX features removed by -v2 above */
2648 { "hle", "on" },
2649 { "rtm", "on" },
2650 /*
2651 * Haswell and Haswell-IBRS had stepping=4 in
2652 * QEMU 4.0 and older
2653 */
2654 { "stepping", "4" },
2655 { "spec-ctrl", "on" },
2656 { "model-id",
2657 "Intel Core Processor (Haswell, IBRS)" },
2658 { /* end of list */ }
2659 }
2660 },
2661 {
2662 .version = 4,
2663 .alias = "Haswell-noTSX-IBRS",
2664 .props = (PropValue[]) {
2665 { "hle", "off" },
2666 { "rtm", "off" },
2667 /* spec-ctrl was already enabled by -v3 above */
2668 { "stepping", "1" },
2669 { "model-id",
2670 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2671 { /* end of list */ }
2672 }
2673 },
2674 { /* end of list */ }
2675 }
2676 },
2677 {
2678 .name = "Broadwell",
2679 .level = 0xd,
2680 .vendor = CPUID_VENDOR_INTEL,
2681 .family = 6,
2682 .model = 61,
2683 .stepping = 2,
2684 .features[FEAT_1_EDX] =
2685 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2686 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2687 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2688 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2689 CPUID_DE | CPUID_FP87,
2690 .features[FEAT_1_ECX] =
2691 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2692 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2693 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2694 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2695 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2696 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2697 .features[FEAT_8000_0001_EDX] =
2698 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2699 CPUID_EXT2_SYSCALL,
2700 .features[FEAT_8000_0001_ECX] =
2701 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2702 .features[FEAT_7_0_EBX] =
2703 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2704 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2705 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2706 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2707 CPUID_7_0_EBX_SMAP,
2708 .features[FEAT_XSAVE] =
2709 CPUID_XSAVE_XSAVEOPT,
2710 .features[FEAT_6_EAX] =
2711 CPUID_6_EAX_ARAT,
2712 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2713 MSR_VMX_BASIC_TRUE_CTLS,
2714 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2715 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2716 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2717 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2718 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2719 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2720 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2721 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2723 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2724 .features[FEAT_VMX_EXIT_CTLS] =
2725 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2726 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2727 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2728 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2729 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2730 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2731 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2732 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2733 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2734 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2735 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2736 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2737 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2738 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2739 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2740 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2741 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2742 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2743 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2744 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2745 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2746 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2747 .features[FEAT_VMX_SECONDARY_CTLS] =
2748 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2749 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2750 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2751 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2752 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2753 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2754 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2755 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2756 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2757 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2758 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2759 .xlevel = 0x80000008,
2760 .model_id = "Intel Core Processor (Broadwell)",
2761 .versions = (X86CPUVersionDefinition[]) {
2762 { .version = 1 },
2763 {
2764 .version = 2,
2765 .alias = "Broadwell-noTSX",
2766 .props = (PropValue[]) {
2767 { "hle", "off" },
2768 { "rtm", "off" },
2769 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2770 { /* end of list */ }
2771 },
2772 },
2773 {
2774 .version = 3,
2775 .alias = "Broadwell-IBRS",
2776 .props = (PropValue[]) {
2777 /* Restore TSX features removed by -v2 above */
2778 { "hle", "on" },
2779 { "rtm", "on" },
2780 { "spec-ctrl", "on" },
2781 { "model-id",
2782 "Intel Core Processor (Broadwell, IBRS)" },
2783 { /* end of list */ }
2784 }
2785 },
2786 {
2787 .version = 4,
2788 .alias = "Broadwell-noTSX-IBRS",
2789 .props = (PropValue[]) {
2790 { "hle", "off" },
2791 { "rtm", "off" },
2792 /* spec-ctrl was already enabled by -v3 above */
2793 { "model-id",
2794 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2795 { /* end of list */ }
2796 }
2797 },
2798 { /* end of list */ }
2799 }
2800 },
2801 {
2802 .name = "Skylake-Client",
2803 .level = 0xd,
2804 .vendor = CPUID_VENDOR_INTEL,
2805 .family = 6,
2806 .model = 94,
2807 .stepping = 3,
2808 .features[FEAT_1_EDX] =
2809 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2810 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2811 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2812 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2813 CPUID_DE | CPUID_FP87,
2814 .features[FEAT_1_ECX] =
2815 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2816 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2817 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2818 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2819 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2820 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2821 .features[FEAT_8000_0001_EDX] =
2822 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2823 CPUID_EXT2_SYSCALL,
2824 .features[FEAT_8000_0001_ECX] =
2825 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2826 .features[FEAT_7_0_EBX] =
2827 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2828 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2829 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2830 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2831 CPUID_7_0_EBX_SMAP,
2832 /* Missing: XSAVES (not supported by some Linux versions,
2833 * including v4.1 to v4.12).
2834 * KVM doesn't yet expose any XSAVES state save component,
2835 * and the only one defined in Skylake (processor tracing)
2836 * probably will block migration anyway.
2837 */
2838 .features[FEAT_XSAVE] =
2839 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2840 CPUID_XSAVE_XGETBV1,
2841 .features[FEAT_6_EAX] =
2842 CPUID_6_EAX_ARAT,
2843 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2844 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2845 MSR_VMX_BASIC_TRUE_CTLS,
2846 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2847 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2848 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2849 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2850 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2851 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2852 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2853 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2855 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2856 .features[FEAT_VMX_EXIT_CTLS] =
2857 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2858 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2859 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2860 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2861 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2862 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2863 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2864 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2865 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2866 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2867 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2868 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2869 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2870 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2871 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2872 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2873 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2874 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2875 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2876 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2877 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2878 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2879 .features[FEAT_VMX_SECONDARY_CTLS] =
2880 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2881 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2882 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2883 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2884 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2885 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2886 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2887 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2888 .xlevel = 0x80000008,
2889 .model_id = "Intel Core Processor (Skylake)",
2890 .versions = (X86CPUVersionDefinition[]) {
2891 { .version = 1 },
2892 {
2893 .version = 2,
2894 .alias = "Skylake-Client-IBRS",
2895 .props = (PropValue[]) {
2896 { "spec-ctrl", "on" },
2897 { "model-id",
2898 "Intel Core Processor (Skylake, IBRS)" },
2899 { /* end of list */ }
2900 }
2901 },
2902 {
2903 .version = 3,
2904 .alias = "Skylake-Client-noTSX-IBRS",
2905 .props = (PropValue[]) {
2906 { "hle", "off" },
2907 { "rtm", "off" },
2908 { /* end of list */ }
2909 }
2910 },
2911 { /* end of list */ }
2912 }
2913 },
2914 {
2915 .name = "Skylake-Server",
2916 .level = 0xd,
2917 .vendor = CPUID_VENDOR_INTEL,
2918 .family = 6,
2919 .model = 85,
2920 .stepping = 4,
2921 .features[FEAT_1_EDX] =
2922 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2923 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2924 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2925 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2926 CPUID_DE | CPUID_FP87,
2927 .features[FEAT_1_ECX] =
2928 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2929 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2930 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2931 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2932 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2933 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2934 .features[FEAT_8000_0001_EDX] =
2935 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2936 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2937 .features[FEAT_8000_0001_ECX] =
2938 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2939 .features[FEAT_7_0_EBX] =
2940 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2941 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2942 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2943 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2944 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2945 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2946 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2947 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2948 .features[FEAT_7_0_ECX] =
2949 CPUID_7_0_ECX_PKU,
2950 /* Missing: XSAVES (not supported by some Linux versions,
2951 * including v4.1 to v4.12).
2952 * KVM doesn't yet expose any XSAVES state save component,
2953 * and the only one defined in Skylake (processor tracing)
2954 * probably will block migration anyway.
2955 */
2956 .features[FEAT_XSAVE] =
2957 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2958 CPUID_XSAVE_XGETBV1,
2959 .features[FEAT_6_EAX] =
2960 CPUID_6_EAX_ARAT,
2961 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2962 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2963 MSR_VMX_BASIC_TRUE_CTLS,
2964 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2965 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2966 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2967 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2968 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2969 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2970 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2971 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2972 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2973 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2974 .features[FEAT_VMX_EXIT_CTLS] =
2975 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2976 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2977 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2978 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2979 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2980 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2981 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2982 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2983 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2984 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2985 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2986 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2987 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2988 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2989 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2990 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2991 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2992 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2993 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2994 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2995 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2996 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2997 .features[FEAT_VMX_SECONDARY_CTLS] =
2998 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2999 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3000 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3001 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3002 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3003 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3004 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3005 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3006 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3007 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3008 .xlevel = 0x80000008,
3009 .model_id = "Intel Xeon Processor (Skylake)",
3010 .versions = (X86CPUVersionDefinition[]) {
3011 { .version = 1 },
3012 {
3013 .version = 2,
3014 .alias = "Skylake-Server-IBRS",
3015 .props = (PropValue[]) {
3016 /* clflushopt was not added to Skylake-Server-IBRS */
3017 /* TODO: add -v3 including clflushopt */
3018 { "clflushopt", "off" },
3019 { "spec-ctrl", "on" },
3020 { "model-id",
3021 "Intel Xeon Processor (Skylake, IBRS)" },
3022 { /* end of list */ }
3023 }
3024 },
3025 {
3026 .version = 3,
3027 .alias = "Skylake-Server-noTSX-IBRS",
3028 .props = (PropValue[]) {
3029 { "hle", "off" },
3030 { "rtm", "off" },
3031 { /* end of list */ }
3032 }
3033 },
3034 { /* end of list */ }
3035 }
3036 },
3037 {
3038 .name = "Cascadelake-Server",
3039 .level = 0xd,
3040 .vendor = CPUID_VENDOR_INTEL,
3041 .family = 6,
3042 .model = 85,
3043 .stepping = 6,
3044 .features[FEAT_1_EDX] =
3045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3049 CPUID_DE | CPUID_FP87,
3050 .features[FEAT_1_ECX] =
3051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3052 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3053 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3054 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3055 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3056 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3057 .features[FEAT_8000_0001_EDX] =
3058 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3059 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3060 .features[FEAT_8000_0001_ECX] =
3061 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3062 .features[FEAT_7_0_EBX] =
3063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3064 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3065 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3066 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3067 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3068 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3069 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3070 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3071 .features[FEAT_7_0_ECX] =
3072 CPUID_7_0_ECX_PKU |
3073 CPUID_7_0_ECX_AVX512VNNI,
3074 .features[FEAT_7_0_EDX] =
3075 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3076 /* Missing: XSAVES (not supported by some Linux versions,
3077 * including v4.1 to v4.12).
3078 * KVM doesn't yet expose any XSAVES state save component,
3079 * and the only one defined in Skylake (processor tracing)
3080 * probably will block migration anyway.
3081 */
3082 .features[FEAT_XSAVE] =
3083 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3084 CPUID_XSAVE_XGETBV1,
3085 .features[FEAT_6_EAX] =
3086 CPUID_6_EAX_ARAT,
3087 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3088 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3089 MSR_VMX_BASIC_TRUE_CTLS,
3090 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3091 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3092 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3093 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3094 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3095 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3096 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3097 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3098 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3099 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3100 .features[FEAT_VMX_EXIT_CTLS] =
3101 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3102 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3103 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3104 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3105 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3106 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3107 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3108 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3109 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3110 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3111 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3112 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3113 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3114 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3115 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3116 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3117 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3118 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3119 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3120 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3121 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3122 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3123 .features[FEAT_VMX_SECONDARY_CTLS] =
3124 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3125 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3126 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3127 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3128 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3129 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3130 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3131 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3132 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3133 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3134 .xlevel = 0x80000008,
3135 .model_id = "Intel Xeon Processor (Cascadelake)",
3136 .versions = (X86CPUVersionDefinition[]) {
3137 { .version = 1 },
3138 { .version = 2,
3139 .props = (PropValue[]) {
3140 { "arch-capabilities", "on" },
3141 { "rdctl-no", "on" },
3142 { "ibrs-all", "on" },
3143 { "skip-l1dfl-vmentry", "on" },
3144 { "mds-no", "on" },
3145 { /* end of list */ }
3146 },
3147 },
3148 { .version = 3,
3149 .alias = "Cascadelake-Server-noTSX",
3150 .props = (PropValue[]) {
3151 { "hle", "off" },
3152 { "rtm", "off" },
3153 { /* end of list */ }
3154 },
3155 },
3156 { /* end of list */ }
3157 }
3158 },
3159 {
3160 .name = "Cooperlake",
3161 .level = 0xd,
3162 .vendor = CPUID_VENDOR_INTEL,
3163 .family = 6,
3164 .model = 85,
3165 .stepping = 10,
3166 .features[FEAT_1_EDX] =
3167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3171 CPUID_DE | CPUID_FP87,
3172 .features[FEAT_1_ECX] =
3173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3179 .features[FEAT_8000_0001_EDX] =
3180 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3181 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3182 .features[FEAT_8000_0001_ECX] =
3183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3184 .features[FEAT_7_0_EBX] =
3185 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3186 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3187 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3188 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3189 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3190 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3191 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3192 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3193 .features[FEAT_7_0_ECX] =
3194 CPUID_7_0_ECX_PKU |
3195 CPUID_7_0_ECX_AVX512VNNI,
3196 .features[FEAT_7_0_EDX] =
3197 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3198 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3199 .features[FEAT_ARCH_CAPABILITIES] =
3200 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3201 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
3202 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
3203 .features[FEAT_7_1_EAX] =
3204 CPUID_7_1_EAX_AVX512_BF16,
3205 /*
3206 * Missing: XSAVES (not supported by some Linux versions,
3207 * including v4.1 to v4.12).
3208 * KVM doesn't yet expose any XSAVES state save component,
3209 * and the only one defined in Skylake (processor tracing)
3210 * probably will block migration anyway.
3211 */
3212 .features[FEAT_XSAVE] =
3213 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3214 CPUID_XSAVE_XGETBV1,
3215 .features[FEAT_6_EAX] =
3216 CPUID_6_EAX_ARAT,
3217 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3218 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3219 MSR_VMX_BASIC_TRUE_CTLS,
3220 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3221 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3222 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3223 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3224 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3225 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3226 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3227 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3228 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3229 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3230 .features[FEAT_VMX_EXIT_CTLS] =
3231 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3232 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3233 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3234 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3235 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3236 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3237 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3238 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3239 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3240 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3241 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3242 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3243 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3244 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3245 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3246 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3247 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3248 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3249 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3250 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3251 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3252 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3253 .features[FEAT_VMX_SECONDARY_CTLS] =
3254 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3255 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3256 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3257 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3258 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3259 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3260 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3261 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3262 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3263 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3264 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3265 .xlevel = 0x80000008,
3266 .model_id = "Intel Xeon Processor (Cooperlake)",
3267 },
3268 {
3269 .name = "Icelake-Client",
3270 .level = 0xd,
3271 .vendor = CPUID_VENDOR_INTEL,
3272 .family = 6,
3273 .model = 126,
3274 .stepping = 0,
3275 .features[FEAT_1_EDX] =
3276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3280 CPUID_DE | CPUID_FP87,
3281 .features[FEAT_1_ECX] =
3282 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3283 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3284 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3285 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3286 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3287 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3288 .features[FEAT_8000_0001_EDX] =
3289 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3290 CPUID_EXT2_SYSCALL,
3291 .features[FEAT_8000_0001_ECX] =
3292 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3293 .features[FEAT_8000_0008_EBX] =
3294 CPUID_8000_0008_EBX_WBNOINVD,
3295 .features[FEAT_7_0_EBX] =
3296 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3297 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3298 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3299 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3300 CPUID_7_0_EBX_SMAP,
3301 .features[FEAT_7_0_ECX] =
3302 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3303 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3304 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3305 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3306 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3307 .features[FEAT_7_0_EDX] =
3308 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3309 /* Missing: XSAVES (not supported by some Linux versions,
3310 * including v4.1 to v4.12).
3311 * KVM doesn't yet expose any XSAVES state save component,
3312 * and the only one defined in Skylake (processor tracing)
3313 * probably will block migration anyway.
3314 */
3315 .features[FEAT_XSAVE] =
3316 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3317 CPUID_XSAVE_XGETBV1,
3318 .features[FEAT_6_EAX] =
3319 CPUID_6_EAX_ARAT,
3320 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3321 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3322 MSR_VMX_BASIC_TRUE_CTLS,
3323 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3324 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3325 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3326 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3327 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3328 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3329 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3330 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3331 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3332 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3333 .features[FEAT_VMX_EXIT_CTLS] =
3334 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3335 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3336 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3337 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3338 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3339 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3340 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3341 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3342 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3343 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3344 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3345 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3346 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3347 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3348 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3349 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3350 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3351 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3352 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3353 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3354 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3355 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3356 .features[FEAT_VMX_SECONDARY_CTLS] =
3357 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3358 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3359 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3360 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3361 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3362 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3363 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3364 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3365 .xlevel = 0x80000008,
3366 .model_id = "Intel Core Processor (Icelake)",
3367 .versions = (X86CPUVersionDefinition[]) {
3368 { .version = 1 },
3369 {
3370 .version = 2,
3371 .alias = "Icelake-Client-noTSX",
3372 .props = (PropValue[]) {
3373 { "hle", "off" },
3374 { "rtm", "off" },
3375 { /* end of list */ }
3376 },
3377 },
3378 { /* end of list */ }
3379 }
3380 },
3381 {
3382 .name = "Icelake-Server",
3383 .level = 0xd,
3384 .vendor = CPUID_VENDOR_INTEL,
3385 .family = 6,
3386 .model = 134,
3387 .stepping = 0,
3388 .features[FEAT_1_EDX] =
3389 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3390 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3391 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3392 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3393 CPUID_DE | CPUID_FP87,
3394 .features[FEAT_1_ECX] =
3395 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3396 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3397 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3398 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3399 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3400 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3401 .features[FEAT_8000_0001_EDX] =
3402 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3403 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3404 .features[FEAT_8000_0001_ECX] =
3405 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3406 .features[FEAT_8000_0008_EBX] =
3407 CPUID_8000_0008_EBX_WBNOINVD,
3408 .features[FEAT_7_0_EBX] =
3409 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3410 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3411 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3412 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3413 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3414 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3415 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3416 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3417 .features[FEAT_7_0_ECX] =
3418 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3419 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3420 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3421 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3422 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3423 .features[FEAT_7_0_EDX] =
3424 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3425 /* Missing: XSAVES (not supported by some Linux versions,
3426 * including v4.1 to v4.12).
3427 * KVM doesn't yet expose any XSAVES state save component,
3428 * and the only one defined in Skylake (processor tracing)
3429 * probably will block migration anyway.
3430 */
3431 .features[FEAT_XSAVE] =
3432 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3433 CPUID_XSAVE_XGETBV1,
3434 .features[FEAT_6_EAX] =
3435 CPUID_6_EAX_ARAT,
3436 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3437 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3438 MSR_VMX_BASIC_TRUE_CTLS,
3439 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3440 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3441 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3442 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3443 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3444 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3445 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3446 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3447 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3448 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3449 .features[FEAT_VMX_EXIT_CTLS] =
3450 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3451 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3452 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3453 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3454 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3455 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3456 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3457 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3458 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3459 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3460 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3461 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3462 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3463 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3464 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3465 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3466 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3467 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3468 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3469 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3470 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3471 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3472 .features[FEAT_VMX_SECONDARY_CTLS] =
3473 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3474 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3475 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3476 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3477 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3478 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3479 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3480 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3481 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3482 .xlevel = 0x80000008,
3483 .model_id = "Intel Xeon Processor (Icelake)",
3484 .versions = (X86CPUVersionDefinition[]) {
3485 { .version = 1 },
3486 {
3487 .version = 2,
3488 .alias = "Icelake-Server-noTSX",
3489 .props = (PropValue[]) {
3490 { "hle", "off" },
3491 { "rtm", "off" },
3492 { /* end of list */ }
3493 },
3494 },
3495 { /* end of list */ }
3496 }
3497 },
3498 {
3499 .name = "Denverton",
3500 .level = 21,
3501 .vendor = CPUID_VENDOR_INTEL,
3502 .family = 6,
3503 .model = 95,
3504 .stepping = 1,
3505 .features[FEAT_1_EDX] =
3506 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3507 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3508 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3509 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3510 CPUID_SSE | CPUID_SSE2,
3511 .features[FEAT_1_ECX] =
3512 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3513 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3514 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3515 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3516 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3517 .features[FEAT_8000_0001_EDX] =
3518 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3519 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3520 .features[FEAT_8000_0001_ECX] =
3521 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3522 .features[FEAT_7_0_EBX] =
3523 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3524 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3525 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3526 .features[FEAT_7_0_EDX] =
3527 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3528 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3529 /*
3530 * Missing: XSAVES (not supported by some Linux versions,
3531 * including v4.1 to v4.12).
3532 * KVM doesn't yet expose any XSAVES state save component,
3533 * and the only one defined in Skylake (processor tracing)
3534 * probably will block migration anyway.
3535 */
3536 .features[FEAT_XSAVE] =
3537 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3538 .features[FEAT_6_EAX] =
3539 CPUID_6_EAX_ARAT,
3540 .features[FEAT_ARCH_CAPABILITIES] =
3541 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3542 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3543 MSR_VMX_BASIC_TRUE_CTLS,
3544 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3545 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3546 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3547 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3548 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3549 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3550 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3551 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3552 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3553 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3554 .features[FEAT_VMX_EXIT_CTLS] =
3555 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3556 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3557 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3558 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3559 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3560 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3561 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3562 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3563 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3564 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3565 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3566 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3567 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3568 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3569 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3570 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3571 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3572 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3573 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3574 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3575 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3576 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3577 .features[FEAT_VMX_SECONDARY_CTLS] =
3578 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3579 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3580 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3581 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3582 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3583 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3584 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3585 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3586 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3587 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3588 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3589 .xlevel = 0x80000008,
3590 .model_id = "Intel Atom Processor (Denverton)",
3591 },
3592 {
3593 .name = "Snowridge",
3594 .level = 27,
3595 .vendor = CPUID_VENDOR_INTEL,
3596 .family = 6,
3597 .model = 134,
3598 .stepping = 1,
3599 .features[FEAT_1_EDX] =
3600 /* missing: CPUID_PN CPUID_IA64 */
3601 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3602 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3603 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3604 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3605 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3606 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3607 CPUID_MMX |
3608 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3609 .features[FEAT_1_ECX] =
3610 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3611 CPUID_EXT_SSSE3 |
3612 CPUID_EXT_CX16 |
3613 CPUID_EXT_SSE41 |
3614 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3615 CPUID_EXT_POPCNT |
3616 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3617 CPUID_EXT_RDRAND,
3618 .features[FEAT_8000_0001_EDX] =
3619 CPUID_EXT2_SYSCALL |
3620 CPUID_EXT2_NX |
3621 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3622 CPUID_EXT2_LM,
3623 .features[FEAT_8000_0001_ECX] =
3624 CPUID_EXT3_LAHF_LM |
3625 CPUID_EXT3_3DNOWPREFETCH,
3626 .features[FEAT_7_0_EBX] =
3627 CPUID_7_0_EBX_FSGSBASE |
3628 CPUID_7_0_EBX_SMEP |
3629 CPUID_7_0_EBX_ERMS |
3630 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3631 CPUID_7_0_EBX_RDSEED |
3632 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3633 CPUID_7_0_EBX_CLWB |
3634 CPUID_7_0_EBX_SHA_NI,
3635 .features[FEAT_7_0_ECX] =
3636 CPUID_7_0_ECX_UMIP |
3637 /* missing bit 5 */
3638 CPUID_7_0_ECX_GFNI |
3639 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3640 CPUID_7_0_ECX_MOVDIR64B,
3641 .features[FEAT_7_0_EDX] =
3642 CPUID_7_0_EDX_SPEC_CTRL |
3643 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3644 CPUID_7_0_EDX_CORE_CAPABILITY,
3645 .features[FEAT_CORE_CAPABILITY] =
3646 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3647 /*
3648 * Missing: XSAVES (not supported by some Linux versions,
3649 * including v4.1 to v4.12).
3650 * KVM doesn't yet expose any XSAVES state save component,
3651 * and the only one defined in Skylake (processor tracing)
3652 * probably will block migration anyway.
3653 */
3654 .features[FEAT_XSAVE] =
3655 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3656 CPUID_XSAVE_XGETBV1,
3657 .features[FEAT_6_EAX] =
3658 CPUID_6_EAX_ARAT,
3659 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3660 MSR_VMX_BASIC_TRUE_CTLS,
3661 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3662 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3663 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3664 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3665 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3666 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3667 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3668 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3669 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3670 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3671 .features[FEAT_VMX_EXIT_CTLS] =
3672 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3673 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3674 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3675 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3676 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3677 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3678 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3679 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3680 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3681 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3682 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3683 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3684 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3685 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3686 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3687 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3688 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3689 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3690 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3691 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3692 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3693 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3694 .features[FEAT_VMX_SECONDARY_CTLS] =
3695 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3696 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3697 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3698 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3699 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3700 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3701 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3702 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3703 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3704 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3705 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3706 .xlevel = 0x80000008,
3707 .model_id = "Intel Atom Processor (SnowRidge)",
3708 .versions = (X86CPUVersionDefinition[]) {
3709 { .version = 1 },
3710 {
3711 .version = 2,
3712 .props = (PropValue[]) {
3713 { "mpx", "off" },
3714 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3715 { /* end of list */ },
3716 },
3717 },
3718 { /* end of list */ },
3719 },
3720 },
3721 {
3722 .name = "KnightsMill",
3723 .level = 0xd,
3724 .vendor = CPUID_VENDOR_INTEL,
3725 .family = 6,
3726 .model = 133,
3727 .stepping = 0,
3728 .features[FEAT_1_EDX] =
3729 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3730 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3731 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3732 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3733 CPUID_PSE | CPUID_DE | CPUID_FP87,
3734 .features[FEAT_1_ECX] =
3735 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3736 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3737 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3738 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3739 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3740 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3741 .features[FEAT_8000_0001_EDX] =
3742 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3743 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3744 .features[FEAT_8000_0001_ECX] =
3745 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3746 .features[FEAT_7_0_EBX] =
3747 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3748 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3749 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3750 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3751 CPUID_7_0_EBX_AVX512ER,
3752 .features[FEAT_7_0_ECX] =
3753 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3754 .features[FEAT_7_0_EDX] =
3755 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3756 .features[FEAT_XSAVE] =
3757 CPUID_XSAVE_XSAVEOPT,
3758 .features[FEAT_6_EAX] =
3759 CPUID_6_EAX_ARAT,
3760 .xlevel = 0x80000008,
3761 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3762 },
3763 {
3764 .name = "Opteron_G1",
3765 .level = 5,
3766 .vendor = CPUID_VENDOR_AMD,
3767 .family = 15,
3768 .model = 6,
3769 .stepping = 1,
3770 .features[FEAT_1_EDX] =
3771 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3772 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3773 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3774 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3775 CPUID_DE | CPUID_FP87,
3776 .features[FEAT_1_ECX] =
3777 CPUID_EXT_SSE3,
3778 .features[FEAT_8000_0001_EDX] =
3779 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3780 .xlevel = 0x80000008,
3781 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3782 },
3783 {
3784 .name = "Opteron_G2",
3785 .level = 5,
3786 .vendor = CPUID_VENDOR_AMD,
3787 .family = 15,
3788 .model = 6,
3789 .stepping = 1,
3790 .features[FEAT_1_EDX] =
3791 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3792 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3793 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3794 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3795 CPUID_DE | CPUID_FP87,
3796 .features[FEAT_1_ECX] =
3797 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3798 .features[FEAT_8000_0001_EDX] =
3799 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3800 .features[FEAT_8000_0001_ECX] =
3801 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3802 .xlevel = 0x80000008,
3803 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3804 },
3805 {
3806 .name = "Opteron_G3",
3807 .level = 5,
3808 .vendor = CPUID_VENDOR_AMD,
3809 .family = 16,
3810 .model = 2,
3811 .stepping = 3,
3812 .features[FEAT_1_EDX] =
3813 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3814 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3815 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3816 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3817 CPUID_DE | CPUID_FP87,
3818 .features[FEAT_1_ECX] =
3819 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3820 CPUID_EXT_SSE3,
3821 .features[FEAT_8000_0001_EDX] =
3822 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3823 CPUID_EXT2_RDTSCP,
3824 .features[FEAT_8000_0001_ECX] =
3825 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3826 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3827 .xlevel = 0x80000008,
3828 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3829 },
3830 {
3831 .name = "Opteron_G4",
3832 .level = 0xd,
3833 .vendor = CPUID_VENDOR_AMD,
3834 .family = 21,
3835 .model = 1,
3836 .stepping = 2,
3837 .features[FEAT_1_EDX] =
3838 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3839 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3840 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3841 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3842 CPUID_DE | CPUID_FP87,
3843 .features[FEAT_1_ECX] =
3844 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3845 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3846 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3847 CPUID_EXT_SSE3,
3848 .features[FEAT_8000_0001_EDX] =
3849 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3850 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3851 .features[FEAT_8000_0001_ECX] =
3852 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3853 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3854 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3855 CPUID_EXT3_LAHF_LM,
3856 .features[FEAT_SVM] =
3857 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3858 /* no xsaveopt! */
3859 .xlevel = 0x8000001A,
3860 .model_id = "AMD Opteron 62xx class CPU",
3861 },
3862 {
3863 .name = "Opteron_G5",
3864 .level = 0xd,
3865 .vendor = CPUID_VENDOR_AMD,
3866 .family = 21,
3867 .model = 2,
3868 .stepping = 0,
3869 .features[FEAT_1_EDX] =
3870 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3871 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3872 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3873 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3874 CPUID_DE | CPUID_FP87,
3875 .features[FEAT_1_ECX] =
3876 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3877 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3878 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3879 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3880 .features[FEAT_8000_0001_EDX] =
3881 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3882 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3883 .features[FEAT_8000_0001_ECX] =
3884 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3885 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3886 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3887 CPUID_EXT3_LAHF_LM,
3888 .features[FEAT_SVM] =
3889 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3890 /* no xsaveopt! */
3891 .xlevel = 0x8000001A,
3892 .model_id = "AMD Opteron 63xx class CPU",
3893 },
3894 {
3895 .name = "EPYC",
3896 .level = 0xd,
3897 .vendor = CPUID_VENDOR_AMD,
3898 .family = 23,
3899 .model = 1,
3900 .stepping = 2,
3901 .features[FEAT_1_EDX] =
3902 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3903 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3904 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3905 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3906 CPUID_VME | CPUID_FP87,
3907 .features[FEAT_1_ECX] =
3908 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3909 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3910 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3911 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3912 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3913 .features[FEAT_8000_0001_EDX] =
3914 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3915 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3916 CPUID_EXT2_SYSCALL,
3917 .features[FEAT_8000_0001_ECX] =
3918 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3919 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3920 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3921 CPUID_EXT3_TOPOEXT,
3922 .features[FEAT_7_0_EBX] =
3923 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3924 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3925 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3926 CPUID_7_0_EBX_SHA_NI,
3927 /* Missing: XSAVES (not supported by some Linux versions,
3928 * including v4.1 to v4.12).
3929 * KVM doesn't yet expose any XSAVES state save component.
3930 */
3931 .features[FEAT_XSAVE] =
3932 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3933 CPUID_XSAVE_XGETBV1,
3934 .features[FEAT_6_EAX] =
3935 CPUID_6_EAX_ARAT,
3936 .features[FEAT_SVM] =
3937 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3938 .xlevel = 0x8000001E,
3939 .model_id = "AMD EPYC Processor",
3940 .cache_info = &epyc_cache_info,
3941 .versions = (X86CPUVersionDefinition[]) {
3942 { .version = 1 },
3943 {
3944 .version = 2,
3945 .alias = "EPYC-IBPB",
3946 .props = (PropValue[]) {
3947 { "ibpb", "on" },
3948 { "model-id",
3949 "AMD EPYC Processor (with IBPB)" },
3950 { /* end of list */ }
3951 }
3952 },
3953 { /* end of list */ }
3954 }
3955 },
3956 {
3957 .name = "Dhyana",
3958 .level = 0xd,
3959 .vendor = CPUID_VENDOR_HYGON,
3960 .family = 24,
3961 .model = 0,
3962 .stepping = 1,
3963 .features[FEAT_1_EDX] =
3964 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3965 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3966 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3967 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3968 CPUID_VME | CPUID_FP87,
3969 .features[FEAT_1_ECX] =
3970 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3971 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3972 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3973 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3974 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3975 .features[FEAT_8000_0001_EDX] =
3976 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3977 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3978 CPUID_EXT2_SYSCALL,
3979 .features[FEAT_8000_0001_ECX] =
3980 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3981 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3982 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3983 CPUID_EXT3_TOPOEXT,
3984 .features[FEAT_8000_0008_EBX] =
3985 CPUID_8000_0008_EBX_IBPB,
3986 .features[FEAT_7_0_EBX] =
3987 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3988 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3989 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3990 /*
3991 * Missing: XSAVES (not supported by some Linux versions,
3992 * including v4.1 to v4.12).
3993 * KVM doesn't yet expose any XSAVES state save component.
3994 */
3995 .features[FEAT_XSAVE] =
3996 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3997 CPUID_XSAVE_XGETBV1,
3998 .features[FEAT_6_EAX] =
3999 CPUID_6_EAX_ARAT,
4000 .features[FEAT_SVM] =
4001 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4002 .xlevel = 0x8000001E,
4003 .model_id = "Hygon Dhyana Processor",
4004 .cache_info = &epyc_cache_info,
4005 },
4006 };
4007
4008 /* KVM-specific features that are automatically added/removed
4009 * from all CPU models when KVM is enabled.
4010 */
4011 static PropValue kvm_default_props[] = {
4012 { "kvmclock", "on" },
4013 { "kvm-nopiodelay", "on" },
4014 { "kvm-asyncpf", "on" },
4015 { "kvm-steal-time", "on" },
4016 { "kvm-pv-eoi", "on" },
4017 { "kvmclock-stable-bit", "on" },
4018 { "x2apic", "on" },
4019 { "acpi", "off" },
4020 { "monitor", "off" },
4021 { "svm", "off" },
4022 { NULL, NULL },
4023 };
4024
4025 /* TCG-specific defaults that override all CPU models when using TCG
4026 */
4027 static PropValue tcg_default_props[] = {
4028 { "vme", "off" },
4029 { NULL, NULL },
4030 };
4031
4032
4033 /*
4034 * We resolve CPU model aliases using -v1 when using "-machine
4035 * none", but this is just for compatibility while libvirt isn't
4036 * adapted to resolve CPU model versions before creating VMs.
4037 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi.
4038 */
4039 X86CPUVersion default_cpu_version = 1;
4040
4041 void x86_cpu_set_default_version(X86CPUVersion version)
4042 {
4043 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
4044 assert(version != CPU_VERSION_AUTO);
4045 default_cpu_version = version;
4046 }
4047
4048 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
4049 {
4050 int v = 0;
4051 const X86CPUVersionDefinition *vdef =
4052 x86_cpu_def_get_versions(model->cpudef);
4053 while (vdef->version) {
4054 v = vdef->version;
4055 vdef++;
4056 }
4057 return v;
4058 }
4059
4060 /* Return the actual version being used for a specific CPU model */
4061 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4062 {
4063 X86CPUVersion v = model->version;
4064 if (v == CPU_VERSION_AUTO) {
4065 v = default_cpu_version;
4066 }
4067 if (v == CPU_VERSION_LATEST) {
4068 return x86_cpu_model_last_version(model);
4069 }
4070 return v;
4071 }
4072
4073 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4074 {
4075 PropValue *pv;
4076 for (pv = kvm_default_props; pv->prop; pv++) {
4077 if (!strcmp(pv->prop, prop)) {
4078 pv->value = value;
4079 break;
4080 }
4081 }
4082
4083 /* It is valid to call this function only for properties that
4084 * are already present in the kvm_default_props table.
4085 */
4086 assert(pv->prop);
4087 }
4088
4089 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4090 bool migratable_only);
4091
4092 static bool lmce_supported(void)
4093 {
4094 uint64_t mce_cap = 0;
4095
4096 #ifdef CONFIG_KVM
4097 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4098 return false;
4099 }
4100 #endif
4101
4102 return !!(mce_cap & MCG_LMCE_P);
4103 }
4104
4105 #define CPUID_MODEL_ID_SZ 48
4106
4107 /**
4108 * cpu_x86_fill_model_id:
4109 * Get CPUID model ID string from host CPU.
4110 *
4111 * @str should have at least CPUID_MODEL_ID_SZ bytes
4112 *
4113 * The function does NOT add a null terminator to the string
4114 * automatically.
4115 */
4116 static int cpu_x86_fill_model_id(char *str)
4117 {
4118 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4119 int i;
4120
4121 for (i = 0; i < 3; i++) {
4122 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4123 memcpy(str + i * 16 + 0, &eax, 4);
4124 memcpy(str + i * 16 + 4, &ebx, 4);
4125 memcpy(str + i * 16 + 8, &ecx, 4);
4126 memcpy(str + i * 16 + 12, &edx, 4);
4127 }
4128 return 0;
4129 }
4130
4131 static Property max_x86_cpu_properties[] = {
4132 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4133 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4134 DEFINE_PROP_END_OF_LIST()
4135 };
4136
4137 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4138 {
4139 DeviceClass *dc = DEVICE_CLASS(oc);
4140 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4141
4142 xcc->ordering = 9;
4143
4144 xcc->model_description =
4145 "Enables all features supported by the accelerator in the current host";
4146
4147 device_class_set_props(dc, max_x86_cpu_properties);
4148 }
4149
4150 static void max_x86_cpu_initfn(Object *obj)
4151 {
4152 X86CPU *cpu = X86_CPU(obj);
4153 CPUX86State *env = &cpu->env;
4154 KVMState *s = kvm_state;
4155
4156 /* We can't fill the features array here because we don't know yet if
4157 * "migratable" is true or false.
4158 */
4159 cpu->max_features = true;
4160
4161 if (accel_uses_host_cpuid()) {
4162 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4163 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4164 int family, model, stepping;
4165
4166 host_vendor_fms(vendor, &family, &model, &stepping);
4167 cpu_x86_fill_model_id(model_id);
4168
4169 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4170 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4171 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4172 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4173 &error_abort);
4174 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4175 &error_abort);
4176
4177 if (kvm_enabled()) {
4178 env->cpuid_min_level =
4179 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4180 env->cpuid_min_xlevel =
4181 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4182 env->cpuid_min_xlevel2 =
4183 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4184 } else {
4185 env->cpuid_min_level =
4186 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4187 env->cpuid_min_xlevel =
4188 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4189 env->cpuid_min_xlevel2 =
4190 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4191 }
4192
4193 if (lmce_supported()) {
4194 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4195 }
4196 } else {
4197 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4198 "vendor", &error_abort);
4199 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4200 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4201 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4202 object_property_set_str(OBJECT(cpu),
4203 "QEMU TCG CPU version " QEMU_HW_VERSION,
4204 "model-id", &error_abort);
4205 }
4206
4207 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4208 }
4209
4210 static const TypeInfo max_x86_cpu_type_info = {
4211 .name = X86_CPU_TYPE_NAME("max"),
4212 .parent = TYPE_X86_CPU,
4213 .instance_init = max_x86_cpu_initfn,
4214 .class_init = max_x86_cpu_class_init,
4215 };
4216
4217 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4218 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4219 {
4220 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4221
4222 xcc->host_cpuid_required = true;
4223 xcc->ordering = 8;
4224
4225 #if defined(CONFIG_KVM)
4226 xcc->model_description =
4227 "KVM processor with all supported host features ";
4228 #elif defined(CONFIG_HVF)
4229 xcc->model_description =
4230 "HVF processor with all supported host features ";
4231 #endif
4232 }
4233
4234 static const TypeInfo host_x86_cpu_type_info = {
4235 .name = X86_CPU_TYPE_NAME("host"),
4236 .parent = X86_CPU_TYPE_NAME("max"),
4237 .class_init = host_x86_cpu_class_init,
4238 };
4239
4240 #endif
4241
4242 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4243 {
4244 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4245
4246 switch (f->type) {
4247 case CPUID_FEATURE_WORD:
4248 {
4249 const char *reg = get_register_name_32(f->cpuid.reg);
4250 assert(reg);
4251 return g_strdup_printf("CPUID.%02XH:%s",
4252 f->cpuid.eax, reg);
4253 }
4254 case MSR_FEATURE_WORD:
4255 return g_strdup_printf("MSR(%02XH)",
4256 f->msr.index);
4257 }
4258
4259 return NULL;
4260 }
4261
4262 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4263 {
4264 FeatureWord w;
4265
4266 for (w = 0; w < FEATURE_WORDS; w++) {
4267 if (cpu->filtered_features[w]) {
4268 return true;
4269 }
4270 }
4271
4272 return false;
4273 }
4274
4275 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4276 const char *verbose_prefix)
4277 {
4278 CPUX86State *env = &cpu->env;
4279 FeatureWordInfo *f = &feature_word_info[w];
4280 int i;
4281
4282 if (!cpu->force_features) {
4283 env->features[w] &= ~mask;
4284 }
4285 cpu->filtered_features[w] |= mask;
4286
4287 if (!verbose_prefix) {
4288 return;
4289 }
4290
4291 for (i = 0; i < 64; ++i) {
4292 if ((1ULL << i) & mask) {
4293 g_autofree char *feat_word_str = feature_word_description(f, i);
4294 warn_report("%s: %s%s%s [bit %d]",
4295 verbose_prefix,
4296 feat_word_str,
4297 f->feat_names[i] ? "." : "",
4298 f->feat_names[i] ? f->feat_names[i] : "", i);
4299 }
4300 }
4301 }
4302
4303 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4304 const char *name, void *opaque,
4305 Error **errp)
4306 {
4307 X86CPU *cpu = X86_CPU(obj);
4308 CPUX86State *env = &cpu->env;
4309 int64_t value;
4310
4311 value = (env->cpuid_version >> 8) & 0xf;
4312 if (value == 0xf) {
4313 value += (env->cpuid_version >> 20) & 0xff;
4314 }
4315 visit_type_int(v, name, &value, errp);
4316 }
4317
4318 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4319 const char *name, void *opaque,
4320 Error **errp)
4321 {
4322 X86CPU *cpu = X86_CPU(obj);
4323 CPUX86State *env = &cpu->env;
4324 const int64_t min = 0;
4325 const int64_t max = 0xff + 0xf;
4326 Error *local_err = NULL;
4327 int64_t value;
4328
4329 visit_type_int(v, name, &value, &local_err);
4330 if (local_err) {
4331 error_propagate(errp, local_err);
4332 return;
4333 }
4334 if (value < min || value > max) {
4335 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4336 name ? name : "null", value, min, max);
4337 return;
4338 }
4339
4340 env->cpuid_version &= ~0xff00f00;
4341 if (value > 0x0f) {
4342 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4343 } else {
4344 env->cpuid_version |= value << 8;
4345 }
4346 }
4347
4348 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4349 const char *name, void *opaque,
4350 Error **errp)
4351 {
4352 X86CPU *cpu = X86_CPU(obj);
4353 CPUX86State *env = &cpu->env;
4354 int64_t value;
4355
4356 value = (env->cpuid_version >> 4) & 0xf;
4357 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4358 visit_type_int(v, name, &value, errp);
4359 }
4360
4361 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4362 const char *name, void *opaque,
4363 Error **errp)
4364 {
4365 X86CPU *cpu = X86_CPU(obj);
4366 CPUX86State *env = &cpu->env;
4367 const int64_t min = 0;
4368 const int64_t max = 0xff;
4369 Error *local_err = NULL;
4370 int64_t value;
4371
4372 visit_type_int(v, name, &value, &local_err);
4373 if (local_err) {
4374 error_propagate(errp, local_err);
4375 return;
4376 }
4377 if (value < min || value > max) {
4378 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4379 name ? name : "null", value, min, max);
4380 return;
4381 }
4382
4383 env->cpuid_version &= ~0xf00f0;
4384 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4385 }
4386
4387 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4388 const char *name, void *opaque,
4389 Error **errp)
4390 {
4391 X86CPU *cpu = X86_CPU(obj);
4392 CPUX86State *env = &cpu->env;
4393 int64_t value;
4394
4395 value = env->cpuid_version & 0xf;
4396 visit_type_int(v, name, &value, errp);
4397 }
4398
4399 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4400 const char *name, void *opaque,
4401 Error **errp)
4402 {
4403 X86CPU *cpu = X86_CPU(obj);
4404 CPUX86State *env = &cpu->env;
4405 const int64_t min = 0;
4406 const int64_t max = 0xf;
4407 Error *local_err = NULL;
4408 int64_t value;
4409
4410 visit_type_int(v, name, &value, &local_err);
4411 if (local_err) {
4412 error_propagate(errp, local_err);
4413 return;
4414 }
4415 if (value < min || value > max) {
4416 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4417 name ? name : "null", value, min, max);
4418 return;
4419 }
4420
4421 env->cpuid_version &= ~0xf;
4422 env->cpuid_version |= value & 0xf;
4423 }
4424
4425 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4426 {
4427 X86CPU *cpu = X86_CPU(obj);
4428 CPUX86State *env = &cpu->env;
4429 char *value;
4430
4431 value = g_malloc(CPUID_VENDOR_SZ + 1);
4432 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4433 env->cpuid_vendor3);
4434 return value;
4435 }
4436
4437 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4438 Error **errp)
4439 {
4440 X86CPU *cpu = X86_CPU(obj);
4441 CPUX86State *env = &cpu->env;
4442 int i;
4443
4444 if (strlen(value) != CPUID_VENDOR_SZ) {
4445 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4446 return;
4447 }
4448
4449 env->cpuid_vendor1 = 0;
4450 env->cpuid_vendor2 = 0;
4451 env->cpuid_vendor3 = 0;
4452 for (i = 0; i < 4; i++) {
4453 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4454 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4455 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4456 }
4457 }
4458
4459 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4460 {
4461 X86CPU *cpu = X86_CPU(obj);
4462 CPUX86State *env = &cpu->env;
4463 char *value;
4464 int i;
4465
4466 value = g_malloc(48 + 1);
4467 for (i = 0; i < 48; i++) {
4468 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4469 }
4470 value[48] = '\0';
4471 return value;
4472 }
4473
4474 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4475 Error **errp)
4476 {
4477 X86CPU *cpu = X86_CPU(obj);
4478 CPUX86State *env = &cpu->env;
4479 int c, len, i;
4480
4481 if (model_id == NULL) {
4482 model_id = "";
4483 }
4484 len = strlen(model_id);
4485 memset(env->cpuid_model, 0, 48);
4486 for (i = 0; i < 48; i++) {
4487 if (i >= len) {
4488 c = '\0';
4489 } else {
4490 c = (uint8_t)model_id[i];
4491 }
4492 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4493 }
4494 }
4495
4496 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4497 void *opaque, Error **errp)
4498 {
4499 X86CPU *cpu = X86_CPU(obj);
4500 int64_t value;
4501
4502 value = cpu->env.tsc_khz * 1000;
4503 visit_type_int(v, name, &value, errp);
4504 }
4505
4506 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4507 void *opaque, Error **errp)
4508 {
4509 X86CPU *cpu = X86_CPU(obj);
4510 const int64_t min = 0;
4511 const int64_t max = INT64_MAX;
4512 Error *local_err = NULL;
4513 int64_t value;
4514
4515 visit_type_int(v, name, &value, &local_err);
4516 if (local_err) {
4517 error_propagate(errp, local_err);
4518 return;
4519 }
4520 if (value < min || value > max) {
4521 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4522 name ? name : "null", value, min, max);
4523 return;
4524 }
4525
4526 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4527 }
4528
4529 /* Generic getter for "feature-words" and "filtered-features" properties */
4530 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4531 const char *name, void *opaque,
4532 Error **errp)
4533 {
4534 uint64_t *array = (uint64_t *)opaque;
4535 FeatureWord w;
4536 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4537 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4538 X86CPUFeatureWordInfoList *list = NULL;
4539
4540 for (w = 0; w < FEATURE_WORDS; w++) {
4541 FeatureWordInfo *wi = &feature_word_info[w];
4542 /*
4543 * We didn't have MSR features when "feature-words" was
4544 * introduced. Therefore skipped other type entries.
4545 */
4546 if (wi->type != CPUID_FEATURE_WORD) {
4547 continue;
4548 }
4549 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4550 qwi->cpuid_input_eax = wi->cpuid.eax;
4551 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4552 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4553 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4554 qwi->features = array[w];
4555
4556 /* List will be in reverse order, but order shouldn't matter */
4557 list_entries[w].next = list;
4558 list_entries[w].value = &word_infos[w];
4559 list = &list_entries[w];
4560 }
4561
4562 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4563 }
4564
4565 /* Convert all '_' in a feature string option name to '-', to make feature
4566 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4567 */
4568 static inline void feat2prop(char *s)
4569 {
4570 while ((s = strchr(s, '_'))) {
4571 *s = '-';
4572 }
4573 }
4574
4575 /* Return the feature property name for a feature flag bit */
4576 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4577 {
4578 const char *name;
4579 /* XSAVE components are automatically enabled by other features,
4580 * so return the original feature name instead
4581 */
4582 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4583 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4584
4585 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4586 x86_ext_save_areas[comp].bits) {
4587 w = x86_ext_save_areas[comp].feature;
4588 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4589 }
4590 }
4591
4592 assert(bitnr < 64);
4593 assert(w < FEATURE_WORDS);
4594 name = feature_word_info[w].feat_names[bitnr];
4595 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4596 return name;
4597 }
4598
4599 /* Compatibily hack to maintain legacy +-feat semantic,
4600 * where +-feat overwrites any feature set by
4601 * feat=on|feat even if the later is parsed after +-feat
4602 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4603 */
4604 static GList *plus_features, *minus_features;
4605
4606 static gint compare_string(gconstpointer a, gconstpointer b)
4607 {
4608 return g_strcmp0(a, b);
4609 }
4610
4611 /* Parse "+feature,-feature,feature=foo" CPU feature string
4612 */
4613 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4614 Error **errp)
4615 {
4616 char *featurestr; /* Single 'key=value" string being parsed */
4617 static bool cpu_globals_initialized;
4618 bool ambiguous = false;
4619
4620 if (cpu_globals_initialized) {
4621 return;
4622 }
4623 cpu_globals_initialized = true;
4624
4625 if (!features) {
4626 return;
4627 }
4628
4629 for (featurestr = strtok(features, ",");
4630 featurestr;
4631 featurestr = strtok(NULL, ",")) {
4632 const char *name;
4633 const char *val = NULL;
4634 char *eq = NULL;
4635 char num[32];
4636 GlobalProperty *prop;
4637
4638 /* Compatibility syntax: */
4639 if (featurestr[0] == '+') {
4640 plus_features = g_list_append(plus_features,
4641 g_strdup(featurestr + 1));
4642 continue;
4643 } else if (featurestr[0] == '-') {
4644 minus_features = g_list_append(minus_features,
4645 g_strdup(featurestr + 1));
4646 continue;
4647 }
4648
4649 eq = strchr(featurestr, '=');
4650 if (eq) {
4651 *eq++ = 0;
4652 val = eq;
4653 } else {
4654 val = "on";
4655 }
4656
4657 feat2prop(featurestr);
4658 name = featurestr;
4659
4660 if (g_list_find_custom(plus_features, name, compare_string)) {
4661 warn_report("Ambiguous CPU model string. "
4662 "Don't mix both \"+%s\" and \"%s=%s\"",
4663 name, name, val);
4664 ambiguous = true;
4665 }
4666 if (g_list_find_custom(minus_features, name, compare_string)) {
4667 warn_report("Ambiguous CPU model string. "
4668 "Don't mix both \"-%s\" and \"%s=%s\"",
4669 name, name, val);
4670 ambiguous = true;
4671 }
4672
4673 /* Special case: */
4674 if (!strcmp(name, "tsc-freq")) {
4675 int ret;
4676 uint64_t tsc_freq;
4677
4678 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4679 if (ret < 0 || tsc_freq > INT64_MAX) {
4680 error_setg(errp, "bad numerical value %s", val);
4681 return;
4682 }
4683 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4684 val = num;
4685 name = "tsc-frequency";
4686 }
4687
4688 prop = g_new0(typeof(*prop), 1);
4689 prop->driver = typename;
4690 prop->property = g_strdup(name);
4691 prop->value = g_strdup(val);
4692 qdev_prop_register_global(prop);
4693 }
4694
4695 if (ambiguous) {
4696 warn_report("Compatibility of ambiguous CPU model "
4697 "strings won't be kept on future QEMU versions");
4698 }
4699 }
4700
4701 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4702 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4703
4704 /* Build a list with the name of all features on a feature word array */
4705 static void x86_cpu_list_feature_names(FeatureWordArray features,
4706 strList **feat_names)
4707 {
4708 FeatureWord w;
4709 strList **next = feat_names;
4710
4711 for (w = 0; w < FEATURE_WORDS; w++) {
4712 uint64_t filtered = features[w];
4713 int i;
4714 for (i = 0; i < 64; i++) {
4715 if (filtered & (1ULL << i)) {
4716 strList *new = g_new0(strList, 1);
4717 new->value = g_strdup(x86_cpu_feature_name(w, i));
4718 *next = new;
4719 next = &new->next;
4720 }
4721 }
4722 }
4723 }
4724
4725 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4726 const char *name, void *opaque,
4727 Error **errp)
4728 {
4729 X86CPU *xc = X86_CPU(obj);
4730 strList *result = NULL;
4731
4732 x86_cpu_list_feature_names(xc->filtered_features, &result);
4733 visit_type_strList(v, "unavailable-features", &result, errp);
4734 }
4735
4736 /* Check for missing features that may prevent the CPU class from
4737 * running using the current machine and accelerator.
4738 */
4739 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4740 strList **missing_feats)
4741 {
4742 X86CPU *xc;
4743 Error *err = NULL;
4744 strList **next = missing_feats;
4745
4746 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4747 strList *new = g_new0(strList, 1);
4748 new->value = g_strdup("kvm");
4749 *missing_feats = new;
4750 return;
4751 }
4752
4753 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4754
4755 x86_cpu_expand_features(xc, &err);
4756 if (err) {
4757 /* Errors at x86_cpu_expand_features should never happen,
4758 * but in case it does, just report the model as not
4759 * runnable at all using the "type" property.
4760 */
4761 strList *new = g_new0(strList, 1);
4762 new->value = g_strdup("type");
4763 *next = new;
4764 next = &new->next;
4765 }
4766
4767 x86_cpu_filter_features(xc, false);
4768
4769 x86_cpu_list_feature_names(xc->filtered_features, next);
4770
4771 object_unref(OBJECT(xc));
4772 }
4773
4774 /* Print all cpuid feature names in featureset
4775 */
4776 static void listflags(GList *features)
4777 {
4778 size_t len = 0;
4779 GList *tmp;
4780
4781 for (tmp = features; tmp; tmp = tmp->next) {
4782 const char *name = tmp->data;
4783 if ((len + strlen(name) + 1) >= 75) {
4784 qemu_printf("\n");
4785 len = 0;
4786 }
4787 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4788 len += strlen(name) + 1;
4789 }
4790 qemu_printf("\n");
4791 }
4792
4793 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4794 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4795 {
4796 ObjectClass *class_a = (ObjectClass *)a;
4797 ObjectClass *class_b = (ObjectClass *)b;
4798 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4799 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4800 int ret;
4801
4802 if (cc_a->ordering != cc_b->ordering) {
4803 ret = cc_a->ordering - cc_b->ordering;
4804 } else {
4805 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4806 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4807 ret = strcmp(name_a, name_b);
4808 }
4809 return ret;
4810 }
4811
4812 static GSList *get_sorted_cpu_model_list(void)
4813 {
4814 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4815 list = g_slist_sort(list, x86_cpu_list_compare);
4816 return list;
4817 }
4818
4819 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4820 {
4821 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4822 char *r = object_property_get_str(obj, "model-id", &error_abort);
4823 object_unref(obj);
4824 return r;
4825 }
4826
4827 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4828 {
4829 X86CPUVersion version;
4830
4831 if (!cc->model || !cc->model->is_alias) {
4832 return NULL;
4833 }
4834 version = x86_cpu_model_resolve_version(cc->model);
4835 if (version <= 0) {
4836 return NULL;
4837 }
4838 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4839 }
4840
4841 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4842 {
4843 ObjectClass *oc = data;
4844 X86CPUClass *cc = X86_CPU_CLASS(oc);
4845 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4846 g_autofree char *desc = g_strdup(cc->model_description);
4847 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4848
4849 if (!desc && alias_of) {
4850 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4851 desc = g_strdup("(alias configured by machine type)");
4852 } else {
4853 desc = g_strdup_printf("(alias of %s)", alias_of);
4854 }
4855 }
4856 if (!desc) {
4857 desc = x86_cpu_class_get_model_id(cc);
4858 }
4859
4860 qemu_printf("x86 %-20s %-48s\n", name, desc);
4861 }
4862
4863 /* list available CPU models and flags */
4864 void x86_cpu_list(void)
4865 {
4866 int i, j;
4867 GSList *list;
4868 GList *names = NULL;
4869
4870 qemu_printf("Available CPUs:\n");
4871 list = get_sorted_cpu_model_list();
4872 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4873 g_slist_free(list);
4874
4875 names = NULL;
4876 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4877 FeatureWordInfo *fw = &feature_word_info[i];
4878 for (j = 0; j < 64; j++) {
4879 if (fw->feat_names[j]) {
4880 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4881 }
4882 }
4883 }
4884
4885 names = g_list_sort(names, (GCompareFunc)strcmp);
4886
4887 qemu_printf("\nRecognized CPUID flags:\n");
4888 listflags(names);
4889 qemu_printf("\n");
4890 g_list_free(names);
4891 }
4892
4893 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4894 {
4895 ObjectClass *oc = data;
4896 X86CPUClass *cc = X86_CPU_CLASS(oc);
4897 CpuDefinitionInfoList **cpu_list = user_data;
4898 CpuDefinitionInfoList *entry;
4899 CpuDefinitionInfo *info;
4900
4901 info = g_malloc0(sizeof(*info));
4902 info->name = x86_cpu_class_get_model_name(cc);
4903 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4904 info->has_unavailable_features = true;
4905 info->q_typename = g_strdup(object_class_get_name(oc));
4906 info->migration_safe = cc->migration_safe;
4907 info->has_migration_safe = true;
4908 info->q_static = cc->static_model;
4909 /*
4910 * Old machine types won't report aliases, so that alias translation
4911 * doesn't break compatibility with previous QEMU versions.
4912 */
4913 if (default_cpu_version != CPU_VERSION_LEGACY) {
4914 info->alias_of = x86_cpu_class_get_alias_of(cc);
4915 info->has_alias_of = !!info->alias_of;
4916 }
4917
4918 entry = g_malloc0(sizeof(*entry));
4919 entry->value = info;
4920 entry->next = *cpu_list;
4921 *cpu_list = entry;
4922 }
4923
4924 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4925 {
4926 CpuDefinitionInfoList *cpu_list = NULL;
4927 GSList *list = get_sorted_cpu_model_list();
4928 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4929 g_slist_free(list);
4930 return cpu_list;
4931 }
4932
4933 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4934 bool migratable_only)
4935 {
4936 FeatureWordInfo *wi = &feature_word_info[w];
4937 uint64_t r = 0;
4938
4939 if (kvm_enabled()) {
4940 switch (wi->type) {
4941 case CPUID_FEATURE_WORD:
4942 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4943 wi->cpuid.ecx,
4944 wi->cpuid.reg);
4945 break;
4946 case MSR_FEATURE_WORD:
4947 r = kvm_arch_get_supported_msr_feature(kvm_state,
4948 wi->msr.index);
4949 break;
4950 }
4951 } else if (hvf_enabled()) {
4952 if (wi->type != CPUID_FEATURE_WORD) {
4953 return 0;
4954 }
4955 r = hvf_get_supported_cpuid(wi->cpuid.eax,
4956 wi->cpuid.ecx,
4957 wi->cpuid.reg);
4958 } else if (tcg_enabled()) {
4959 r = wi->tcg_features;
4960 } else {
4961 return ~0;
4962 }
4963 if (migratable_only) {
4964 r &= x86_cpu_get_migratable_flags(w);
4965 }
4966 return r;
4967 }
4968
4969 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
4970 {
4971 PropValue *pv;
4972 for (pv = props; pv->prop; pv++) {
4973 if (!pv->value) {
4974 continue;
4975 }
4976 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
4977 &error_abort);
4978 }
4979 }
4980
4981 /* Apply properties for the CPU model version specified in model */
4982 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
4983 {
4984 const X86CPUVersionDefinition *vdef;
4985 X86CPUVersion version = x86_cpu_model_resolve_version(model);
4986
4987 if (version == CPU_VERSION_LEGACY) {
4988 return;
4989 }
4990
4991 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
4992 PropValue *p;
4993
4994 for (p = vdef->props; p && p->prop; p++) {
4995 object_property_parse(OBJECT(cpu), p->value, p->prop,
4996 &error_abort);
4997 }
4998
4999 if (vdef->version == version) {
5000 break;
5001 }
5002 }
5003
5004 /*
5005 * If we reached the end of the list, version number was invalid
5006 */
5007 assert(vdef->version == version);
5008 }
5009
5010 /* Load data from X86CPUDefinition into a X86CPU object
5011 */
5012 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
5013 {
5014 X86CPUDefinition *def = model->cpudef;
5015 CPUX86State *env = &cpu->env;
5016 const char *vendor;
5017 char host_vendor[CPUID_VENDOR_SZ + 1];
5018 FeatureWord w;
5019
5020 /*NOTE: any property set by this function should be returned by
5021 * x86_cpu_static_props(), so static expansion of
5022 * query-cpu-model-expansion is always complete.
5023 */
5024
5025 /* CPU models only set _minimum_ values for level/xlevel: */
5026 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
5027 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
5028
5029 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
5030 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
5031 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
5032 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
5033 for (w = 0; w < FEATURE_WORDS; w++) {
5034 env->features[w] = def->features[w];
5035 }
5036
5037 /* legacy-cache defaults to 'off' if CPU model provides cache info */
5038 cpu->legacy_cache = !def->cache_info;
5039
5040 /* Special cases not set in the X86CPUDefinition structs: */
5041 /* TODO: in-kernel irqchip for hvf */
5042 if (kvm_enabled()) {
5043 if (!kvm_irqchip_in_kernel()) {
5044 x86_cpu_change_kvm_default("x2apic", "off");
5045 }
5046
5047 x86_cpu_apply_props(cpu, kvm_default_props);
5048 } else if (tcg_enabled()) {
5049 x86_cpu_apply_props(cpu, tcg_default_props);
5050 }
5051
5052 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
5053
5054 /* sysenter isn't supported in compatibility mode on AMD,
5055 * syscall isn't supported in compatibility mode on Intel.
5056 * Normally we advertise the actual CPU vendor, but you can
5057 * override this using the 'vendor' property if you want to use
5058 * KVM's sysenter/syscall emulation in compatibility mode and
5059 * when doing cross vendor migration
5060 */
5061 vendor = def->vendor;
5062 if (accel_uses_host_cpuid()) {
5063 uint32_t ebx = 0, ecx = 0, edx = 0;
5064 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5065 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5066 vendor = host_vendor;
5067 }
5068
5069 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
5070
5071 x86_cpu_apply_version_props(cpu, model);
5072 }
5073
5074 #ifndef CONFIG_USER_ONLY
5075 /* Return a QDict containing keys for all properties that can be included
5076 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5077 * must be included in the dictionary.
5078 */
5079 static QDict *x86_cpu_static_props(void)
5080 {
5081 FeatureWord w;
5082 int i;
5083 static const char *props[] = {
5084 "min-level",
5085 "min-xlevel",
5086 "family",
5087 "model",
5088 "stepping",
5089 "model-id",
5090 "vendor",
5091 "lmce",
5092 NULL,
5093 };
5094 static QDict *d;
5095
5096 if (d) {
5097 return d;
5098 }
5099
5100 d = qdict_new();
5101 for (i = 0; props[i]; i++) {
5102 qdict_put_null(d, props[i]);
5103 }
5104
5105 for (w = 0; w < FEATURE_WORDS; w++) {
5106 FeatureWordInfo *fi = &feature_word_info[w];
5107 int bit;
5108 for (bit = 0; bit < 64; bit++) {
5109 if (!fi->feat_names[bit]) {
5110 continue;
5111 }
5112 qdict_put_null(d, fi->feat_names[bit]);
5113 }
5114 }
5115
5116 return d;
5117 }
5118
5119 /* Add an entry to @props dict, with the value for property. */
5120 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5121 {
5122 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5123 &error_abort);
5124
5125 qdict_put_obj(props, prop, value);
5126 }
5127
5128 /* Convert CPU model data from X86CPU object to a property dictionary
5129 * that can recreate exactly the same CPU model.
5130 */
5131 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5132 {
5133 QDict *sprops = x86_cpu_static_props();
5134 const QDictEntry *e;
5135
5136 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5137 const char *prop = qdict_entry_key(e);
5138 x86_cpu_expand_prop(cpu, props, prop);
5139 }
5140 }
5141
5142 /* Convert CPU model data from X86CPU object to a property dictionary
5143 * that can recreate exactly the same CPU model, including every
5144 * writeable QOM property.
5145 */
5146 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5147 {
5148 ObjectPropertyIterator iter;
5149 ObjectProperty *prop;
5150
5151 object_property_iter_init(&iter, OBJECT(cpu));
5152 while ((prop = object_property_iter_next(&iter))) {
5153 /* skip read-only or write-only properties */
5154 if (!prop->get || !prop->set) {
5155 continue;
5156 }
5157
5158 /* "hotplugged" is the only property that is configurable
5159 * on the command-line but will be set differently on CPUs
5160 * created using "-cpu ... -smp ..." and by CPUs created
5161 * on the fly by x86_cpu_from_model() for querying. Skip it.
5162 */
5163 if (!strcmp(prop->name, "hotplugged")) {
5164 continue;
5165 }
5166 x86_cpu_expand_prop(cpu, props, prop->name);
5167 }
5168 }
5169
5170 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5171 {
5172 const QDictEntry *prop;
5173 Error *err = NULL;
5174
5175 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5176 object_property_set_qobject(obj, qdict_entry_value(prop),
5177 qdict_entry_key(prop), &err);
5178 if (err) {
5179 break;
5180 }
5181 }
5182
5183 error_propagate(errp, err);
5184 }
5185
5186 /* Create X86CPU object according to model+props specification */
5187 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5188 {
5189 X86CPU *xc = NULL;
5190 X86CPUClass *xcc;
5191 Error *err = NULL;
5192
5193 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5194 if (xcc == NULL) {
5195 error_setg(&err, "CPU model '%s' not found", model);
5196 goto out;
5197 }
5198
5199 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5200 if (props) {
5201 object_apply_props(OBJECT(xc), props, &err);
5202 if (err) {
5203 goto out;
5204 }
5205 }
5206
5207 x86_cpu_expand_features(xc, &err);
5208 if (err) {
5209 goto out;
5210 }
5211
5212 out:
5213 if (err) {
5214 error_propagate(errp, err);
5215 object_unref(OBJECT(xc));
5216 xc = NULL;
5217 }
5218 return xc;
5219 }
5220
5221 CpuModelExpansionInfo *
5222 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5223 CpuModelInfo *model,
5224 Error **errp)
5225 {
5226 X86CPU *xc = NULL;
5227 Error *err = NULL;
5228 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5229 QDict *props = NULL;
5230 const char *base_name;
5231
5232 xc = x86_cpu_from_model(model->name,
5233 model->has_props ?
5234 qobject_to(QDict, model->props) :
5235 NULL, &err);
5236 if (err) {
5237 goto out;
5238 }
5239
5240 props = qdict_new();
5241 ret->model = g_new0(CpuModelInfo, 1);
5242 ret->model->props = QOBJECT(props);
5243 ret->model->has_props = true;
5244
5245 switch (type) {
5246 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5247 /* Static expansion will be based on "base" only */
5248 base_name = "base";
5249 x86_cpu_to_dict(xc, props);
5250 break;
5251 case CPU_MODEL_EXPANSION_TYPE_FULL:
5252 /* As we don't return every single property, full expansion needs
5253 * to keep the original model name+props, and add extra
5254 * properties on top of that.
5255 */
5256 base_name = model->name;
5257 x86_cpu_to_dict_full(xc, props);
5258 break;
5259 default:
5260 error_setg(&err, "Unsupported expansion type");
5261 goto out;
5262 }
5263
5264 x86_cpu_to_dict(xc, props);
5265
5266 ret->model->name = g_strdup(base_name);
5267
5268 out:
5269 object_unref(OBJECT(xc));
5270 if (err) {
5271 error_propagate(errp, err);
5272 qapi_free_CpuModelExpansionInfo(ret);
5273 ret = NULL;
5274 }
5275 return ret;
5276 }
5277 #endif /* !CONFIG_USER_ONLY */
5278
5279 static gchar *x86_gdb_arch_name(CPUState *cs)
5280 {
5281 #ifdef TARGET_X86_64
5282 return g_strdup("i386:x86-64");
5283 #else
5284 return g_strdup("i386");
5285 #endif
5286 }
5287
5288 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5289 {
5290 X86CPUModel *model = data;
5291 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5292
5293 xcc->model = model;
5294 xcc->migration_safe = true;
5295 }
5296
5297 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5298 {
5299 g_autofree char *typename = x86_cpu_type_name(name);
5300 TypeInfo ti = {
5301 .name = typename,
5302 .parent = TYPE_X86_CPU,
5303 .class_init = x86_cpu_cpudef_class_init,
5304 .class_data = model,
5305 };
5306
5307 type_register(&ti);
5308 }
5309
5310 static void x86_register_cpudef_types(X86CPUDefinition *def)
5311 {
5312 X86CPUModel *m;
5313 const X86CPUVersionDefinition *vdef;
5314
5315 /* AMD aliases are handled at runtime based on CPUID vendor, so
5316 * they shouldn't be set on the CPU model table.
5317 */
5318 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5319 /* catch mistakes instead of silently truncating model_id when too long */
5320 assert(def->model_id && strlen(def->model_id) <= 48);
5321
5322 /* Unversioned model: */
5323 m = g_new0(X86CPUModel, 1);
5324 m->cpudef = def;
5325 m->version = CPU_VERSION_AUTO;
5326 m->is_alias = true;
5327 x86_register_cpu_model_type(def->name, m);
5328
5329 /* Versioned models: */
5330
5331 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5332 X86CPUModel *m = g_new0(X86CPUModel, 1);
5333 g_autofree char *name =
5334 x86_cpu_versioned_model_name(def, vdef->version);
5335 m->cpudef = def;
5336 m->version = vdef->version;
5337 x86_register_cpu_model_type(name, m);
5338
5339 if (vdef->alias) {
5340 X86CPUModel *am = g_new0(X86CPUModel, 1);
5341 am->cpudef = def;
5342 am->version = vdef->version;
5343 am->is_alias = true;
5344 x86_register_cpu_model_type(vdef->alias, am);
5345 }
5346 }
5347
5348 }
5349
5350 #if !defined(CONFIG_USER_ONLY)
5351
5352 void cpu_clear_apic_feature(CPUX86State *env)
5353 {
5354 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5355 }
5356
5357 #endif /* !CONFIG_USER_ONLY */
5358
5359 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5360 uint32_t *eax, uint32_t *ebx,
5361 uint32_t *ecx, uint32_t *edx)
5362 {
5363 X86CPU *cpu = env_archcpu(env);
5364 CPUState *cs = env_cpu(env);
5365 uint32_t die_offset;
5366 uint32_t limit;
5367 uint32_t signature[3];
5368
5369 /* Calculate & apply limits for different index ranges */
5370 if (index >= 0xC0000000) {
5371 limit = env->cpuid_xlevel2;
5372 } else if (index >= 0x80000000) {
5373 limit = env->cpuid_xlevel;
5374 } else if (index >= 0x40000000) {
5375 limit = 0x40000001;
5376 } else {
5377 limit = env->cpuid_level;
5378 }
5379
5380 if (index > limit) {
5381 /* Intel documentation states that invalid EAX input will
5382 * return the same information as EAX=cpuid_level
5383 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5384 */
5385 index = env->cpuid_level;
5386 }
5387
5388 switch(index) {
5389 case 0:
5390 *eax = env->cpuid_level;
5391 *ebx = env->cpuid_vendor1;
5392 *edx = env->cpuid_vendor2;
5393 *ecx = env->cpuid_vendor3;
5394 break;
5395 case 1:
5396 *eax = env->cpuid_version;
5397 *ebx = (cpu->apic_id << 24) |
5398 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5399 *ecx = env->features[FEAT_1_ECX];
5400 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5401 *ecx |= CPUID_EXT_OSXSAVE;
5402 }
5403 *edx = env->features[FEAT_1_EDX];
5404 if (cs->nr_cores * cs->nr_threads > 1) {
5405 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5406 *edx |= CPUID_HT;
5407 }
5408 break;
5409 case 2:
5410 /* cache info: needed for Pentium Pro compatibility */
5411 if (cpu->cache_info_passthrough) {
5412 host_cpuid(index, 0, eax, ebx, ecx, edx);
5413 break;
5414 }
5415 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5416 *ebx = 0;
5417 if (!cpu->enable_l3_cache) {
5418 *ecx = 0;
5419 } else {
5420 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5421 }
5422 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5423 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5424 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5425 break;
5426 case 4:
5427 /* cache info: needed for Core compatibility */
5428 if (cpu->cache_info_passthrough) {
5429 host_cpuid(index, count, eax, ebx, ecx, edx);
5430 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5431 *eax &= ~0xFC000000;
5432 if ((*eax & 31) && cs->nr_cores > 1) {
5433 *eax |= (cs->nr_cores - 1) << 26;
5434 }
5435 } else {
5436 *eax = 0;
5437 switch (count) {
5438 case 0: /* L1 dcache info */
5439 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5440 1, cs->nr_cores,
5441 eax, ebx, ecx, edx);
5442 break;
5443 case 1: /* L1 icache info */
5444 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5445 1, cs->nr_cores,
5446 eax, ebx, ecx, edx);
5447 break;
5448 case 2: /* L2 cache info */
5449 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5450 cs->nr_threads, cs->nr_cores,
5451 eax, ebx, ecx, edx);
5452 break;
5453 case 3: /* L3 cache info */
5454 die_offset = apicid_die_offset(env->nr_dies,
5455 cs->nr_cores, cs->nr_threads);
5456 if (cpu->enable_l3_cache) {
5457 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5458 (1 << die_offset), cs->nr_cores,
5459 eax, ebx, ecx, edx);
5460 break;
5461 }
5462 /* fall through */
5463 default: /* end of info */
5464 *eax = *ebx = *ecx = *edx = 0;
5465 break;
5466 }
5467 }
5468 break;
5469 case 5:
5470 /* MONITOR/MWAIT Leaf */
5471 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5472 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5473 *ecx = cpu->mwait.ecx; /* flags */
5474 *edx = cpu->mwait.edx; /* mwait substates */
5475 break;
5476 case 6:
5477 /* Thermal and Power Leaf */
5478 *eax = env->features[FEAT_6_EAX];
5479 *ebx = 0;
5480 *ecx = 0;
5481 *edx = 0;
5482 break;
5483 case 7:
5484 /* Structured Extended Feature Flags Enumeration Leaf */
5485 if (count == 0) {
5486 /* Maximum ECX value for sub-leaves */
5487 *eax = env->cpuid_level_func7;
5488 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5489 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5490 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5491 *ecx |= CPUID_7_0_ECX_OSPKE;
5492 }
5493 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5494 } else if (count == 1) {
5495 *eax = env->features[FEAT_7_1_EAX];
5496 *ebx = 0;
5497 *ecx = 0;
5498 *edx = 0;
5499 } else {
5500 *eax = 0;
5501 *ebx = 0;
5502 *ecx = 0;
5503 *edx = 0;
5504 }
5505 break;
5506 case 9:
5507 /* Direct Cache Access Information Leaf */
5508 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5509 *ebx = 0;
5510 *ecx = 0;
5511 *edx = 0;
5512 break;
5513 case 0xA:
5514 /* Architectural Performance Monitoring Leaf */
5515 if (kvm_enabled() && cpu->enable_pmu) {
5516 KVMState *s = cs->kvm_state;
5517
5518 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5519 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5520 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5521 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5522 } else if (hvf_enabled() && cpu->enable_pmu) {
5523 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5524 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5525 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5526 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5527 } else {
5528 *eax = 0;
5529 *ebx = 0;
5530 *ecx = 0;
5531 *edx = 0;
5532 }
5533 break;
5534 case 0xB:
5535 /* Extended Topology Enumeration Leaf */
5536 if (!cpu->enable_cpuid_0xb) {
5537 *eax = *ebx = *ecx = *edx = 0;
5538 break;
5539 }
5540
5541 *ecx = count & 0xff;
5542 *edx = cpu->apic_id;
5543
5544 switch (count) {
5545 case 0:
5546 *eax = apicid_core_offset(env->nr_dies,
5547 cs->nr_cores, cs->nr_threads);
5548 *ebx = cs->nr_threads;
5549 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5550 break;
5551 case 1:
5552 *eax = apicid_pkg_offset(env->nr_dies,
5553 cs->nr_cores, cs->nr_threads);
5554 *ebx = cs->nr_cores * cs->nr_threads;
5555 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5556 break;
5557 default:
5558 *eax = 0;
5559 *ebx = 0;
5560 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5561 }
5562
5563 assert(!(*eax & ~0x1f));
5564 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5565 break;
5566 case 0x1F:
5567 /* V2 Extended Topology Enumeration Leaf */
5568 if (env->nr_dies < 2) {
5569 *eax = *ebx = *ecx = *edx = 0;
5570 break;
5571 }
5572
5573 *ecx = count & 0xff;
5574 *edx = cpu->apic_id;
5575 switch (count) {
5576 case 0:
5577 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
5578 cs->nr_threads);
5579 *ebx = cs->nr_threads;
5580 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5581 break;
5582 case 1:
5583 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
5584 cs->nr_threads);
5585 *ebx = cs->nr_cores * cs->nr_threads;
5586 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5587 break;
5588 case 2:
5589 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
5590 cs->nr_threads);
5591 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5592 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5593 break;
5594 default:
5595 *eax = 0;
5596 *ebx = 0;
5597 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5598 }
5599 assert(!(*eax & ~0x1f));
5600 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5601 break;
5602 case 0xD: {
5603 /* Processor Extended State */
5604 *eax = 0;
5605 *ebx = 0;
5606 *ecx = 0;
5607 *edx = 0;
5608 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5609 break;
5610 }
5611
5612 if (count == 0) {
5613 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5614 *eax = env->features[FEAT_XSAVE_COMP_LO];
5615 *edx = env->features[FEAT_XSAVE_COMP_HI];
5616 /*
5617 * The initial value of xcr0 and ebx == 0, On host without kvm
5618 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5619 * even through guest update xcr0, this will crash some legacy guest
5620 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5621 */
5622 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5623 } else if (count == 1) {
5624 *eax = env->features[FEAT_XSAVE];
5625 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5626 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5627 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5628 *eax = esa->size;
5629 *ebx = esa->offset;
5630 }
5631 }
5632 break;
5633 }
5634 case 0x14: {
5635 /* Intel Processor Trace Enumeration */
5636 *eax = 0;
5637 *ebx = 0;
5638 *ecx = 0;
5639 *edx = 0;
5640 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5641 !kvm_enabled()) {
5642 break;
5643 }
5644
5645 if (count == 0) {
5646 *eax = INTEL_PT_MAX_SUBLEAF;
5647 *ebx = INTEL_PT_MINIMAL_EBX;
5648 *ecx = INTEL_PT_MINIMAL_ECX;
5649 } else if (count == 1) {
5650 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5651 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5652 }
5653 break;
5654 }
5655 case 0x40000000:
5656 /*
5657 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5658 * set here, but we restrict to TCG none the less.
5659 */
5660 if (tcg_enabled() && cpu->expose_tcg) {
5661 memcpy(signature, "TCGTCGTCGTCG", 12);
5662 *eax = 0x40000001;
5663 *ebx = signature[0];
5664 *ecx = signature[1];
5665 *edx = signature[2];
5666 } else {
5667 *eax = 0;
5668 *ebx = 0;
5669 *ecx = 0;
5670 *edx = 0;
5671 }
5672 break;
5673 case 0x40000001:
5674 *eax = 0;
5675 *ebx = 0;
5676 *ecx = 0;
5677 *edx = 0;
5678 break;
5679 case 0x80000000:
5680 *eax = env->cpuid_xlevel;
5681 *ebx = env->cpuid_vendor1;
5682 *edx = env->cpuid_vendor2;
5683 *ecx = env->cpuid_vendor3;
5684 break;
5685 case 0x80000001:
5686 *eax = env->cpuid_version;
5687 *ebx = 0;
5688 *ecx = env->features[FEAT_8000_0001_ECX];
5689 *edx = env->features[FEAT_8000_0001_EDX];
5690
5691 /* The Linux kernel checks for the CMPLegacy bit and
5692 * discards multiple thread information if it is set.
5693 * So don't set it here for Intel to make Linux guests happy.
5694 */
5695 if (cs->nr_cores * cs->nr_threads > 1) {
5696 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5697 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5698 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5699 *ecx |= 1 << 1; /* CmpLegacy bit */
5700 }
5701 }
5702 break;
5703 case 0x80000002:
5704 case 0x80000003:
5705 case 0x80000004:
5706 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5707 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5708 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5709 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5710 break;
5711 case 0x80000005:
5712 /* cache info (L1 cache) */
5713 if (cpu->cache_info_passthrough) {
5714 host_cpuid(index, 0, eax, ebx, ecx, edx);
5715 break;
5716 }
5717 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
5718 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5719 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
5720 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5721 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5722 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5723 break;
5724 case 0x80000006:
5725 /* cache info (L2 cache) */
5726 if (cpu->cache_info_passthrough) {
5727 host_cpuid(index, 0, eax, ebx, ecx, edx);
5728 break;
5729 }
5730 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
5731 (L2_DTLB_2M_ENTRIES << 16) | \
5732 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
5733 (L2_ITLB_2M_ENTRIES);
5734 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
5735 (L2_DTLB_4K_ENTRIES << 16) | \
5736 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
5737 (L2_ITLB_4K_ENTRIES);
5738 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5739 cpu->enable_l3_cache ?
5740 env->cache_info_amd.l3_cache : NULL,
5741 ecx, edx);
5742 break;
5743 case 0x80000007:
5744 *eax = 0;
5745 *ebx = 0;
5746 *ecx = 0;
5747 *edx = env->features[FEAT_8000_0007_EDX];
5748 break;
5749 case 0x80000008:
5750 /* virtual & phys address size in low 2 bytes. */
5751 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5752 /* 64 bit processor */
5753 *eax = cpu->phys_bits; /* configurable physical bits */
5754 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5755 *eax |= 0x00003900; /* 57 bits virtual */
5756 } else {
5757 *eax |= 0x00003000; /* 48 bits virtual */
5758 }
5759 } else {
5760 *eax = cpu->phys_bits;
5761 }
5762 *ebx = env->features[FEAT_8000_0008_EBX];
5763 *ecx = 0;
5764 *edx = 0;
5765 if (cs->nr_cores * cs->nr_threads > 1) {
5766 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
5767 }
5768 break;
5769 case 0x8000000A:
5770 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5771 *eax = 0x00000001; /* SVM Revision */
5772 *ebx = 0x00000010; /* nr of ASIDs */
5773 *ecx = 0;
5774 *edx = env->features[FEAT_SVM]; /* optional features */
5775 } else {
5776 *eax = 0;
5777 *ebx = 0;
5778 *ecx = 0;
5779 *edx = 0;
5780 }
5781 break;
5782 case 0x8000001D:
5783 *eax = 0;
5784 if (cpu->cache_info_passthrough) {
5785 host_cpuid(index, count, eax, ebx, ecx, edx);
5786 break;
5787 }
5788 switch (count) {
5789 case 0: /* L1 dcache info */
5790 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
5791 eax, ebx, ecx, edx);
5792 break;
5793 case 1: /* L1 icache info */
5794 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
5795 eax, ebx, ecx, edx);
5796 break;
5797 case 2: /* L2 cache info */
5798 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
5799 eax, ebx, ecx, edx);
5800 break;
5801 case 3: /* L3 cache info */
5802 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
5803 eax, ebx, ecx, edx);
5804 break;
5805 default: /* end of info */
5806 *eax = *ebx = *ecx = *edx = 0;
5807 break;
5808 }
5809 break;
5810 case 0x8000001E:
5811 assert(cpu->core_id <= 255);
5812 encode_topo_cpuid8000001e(cs, cpu,
5813 eax, ebx, ecx, edx);
5814 break;
5815 case 0xC0000000:
5816 *eax = env->cpuid_xlevel2;
5817 *ebx = 0;
5818 *ecx = 0;
5819 *edx = 0;
5820 break;
5821 case 0xC0000001:
5822 /* Support for VIA CPU's CPUID instruction */
5823 *eax = env->cpuid_version;
5824 *ebx = 0;
5825 *ecx = 0;
5826 *edx = env->features[FEAT_C000_0001_EDX];
5827 break;
5828 case 0xC0000002:
5829 case 0xC0000003:
5830 case 0xC0000004:
5831 /* Reserved for the future, and now filled with zero */
5832 *eax = 0;
5833 *ebx = 0;
5834 *ecx = 0;
5835 *edx = 0;
5836 break;
5837 case 0x8000001F:
5838 *eax = sev_enabled() ? 0x2 : 0;
5839 *ebx = sev_get_cbit_position();
5840 *ebx |= sev_get_reduced_phys_bits() << 6;
5841 *ecx = 0;
5842 *edx = 0;
5843 break;
5844 default:
5845 /* reserved values: zero */
5846 *eax = 0;
5847 *ebx = 0;
5848 *ecx = 0;
5849 *edx = 0;
5850 break;
5851 }
5852 }
5853
5854 /* CPUClass::reset() */
5855 static void x86_cpu_reset(CPUState *s)
5856 {
5857 X86CPU *cpu = X86_CPU(s);
5858 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5859 CPUX86State *env = &cpu->env;
5860 target_ulong cr4;
5861 uint64_t xcr0;
5862 int i;
5863
5864 xcc->parent_reset(s);
5865
5866 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5867
5868 env->old_exception = -1;
5869
5870 /* init to reset state */
5871
5872 env->hflags2 |= HF2_GIF_MASK;
5873
5874 cpu_x86_update_cr0(env, 0x60000010);
5875 env->a20_mask = ~0x0;
5876 env->smbase = 0x30000;
5877 env->msr_smi_count = 0;
5878
5879 env->idt.limit = 0xffff;
5880 env->gdt.limit = 0xffff;
5881 env->ldt.limit = 0xffff;
5882 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5883 env->tr.limit = 0xffff;
5884 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5885
5886 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5887 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5888 DESC_R_MASK | DESC_A_MASK);
5889 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5890 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5891 DESC_A_MASK);
5892 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
5893 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5894 DESC_A_MASK);
5895 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
5896 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5897 DESC_A_MASK);
5898 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
5899 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5900 DESC_A_MASK);
5901 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
5902 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5903 DESC_A_MASK);
5904
5905 env->eip = 0xfff0;
5906 env->regs[R_EDX] = env->cpuid_version;
5907
5908 env->eflags = 0x2;
5909
5910 /* FPU init */
5911 for (i = 0; i < 8; i++) {
5912 env->fptags[i] = 1;
5913 }
5914 cpu_set_fpuc(env, 0x37f);
5915
5916 env->mxcsr = 0x1f80;
5917 /* All units are in INIT state. */
5918 env->xstate_bv = 0;
5919
5920 env->pat = 0x0007040600070406ULL;
5921 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5922 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5923 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5924 }
5925
5926 memset(env->dr, 0, sizeof(env->dr));
5927 env->dr[6] = DR6_FIXED_1;
5928 env->dr[7] = DR7_FIXED_1;
5929 cpu_breakpoint_remove_all(s, BP_CPU);
5930 cpu_watchpoint_remove_all(s, BP_CPU);
5931
5932 cr4 = 0;
5933 xcr0 = XSTATE_FP_MASK;
5934
5935 #ifdef CONFIG_USER_ONLY
5936 /* Enable all the features for user-mode. */
5937 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5938 xcr0 |= XSTATE_SSE_MASK;
5939 }
5940 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5941 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5942 if (env->features[esa->feature] & esa->bits) {
5943 xcr0 |= 1ull << i;
5944 }
5945 }
5946
5947 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
5948 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
5949 }
5950 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
5951 cr4 |= CR4_FSGSBASE_MASK;
5952 }
5953 #endif
5954
5955 env->xcr0 = xcr0;
5956 cpu_x86_update_cr4(env, cr4);
5957
5958 /*
5959 * SDM 11.11.5 requires:
5960 * - IA32_MTRR_DEF_TYPE MSR.E = 0
5961 * - IA32_MTRR_PHYSMASKn.V = 0
5962 * All other bits are undefined. For simplification, zero it all.
5963 */
5964 env->mtrr_deftype = 0;
5965 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
5966 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
5967
5968 env->interrupt_injected = -1;
5969 env->exception_nr = -1;
5970 env->exception_pending = 0;
5971 env->exception_injected = 0;
5972 env->exception_has_payload = false;
5973 env->exception_payload = 0;
5974 env->nmi_injected = false;
5975 #if !defined(CONFIG_USER_ONLY)
5976 /* We hard-wire the BSP to the first CPU. */
5977 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
5978
5979 s->halted = !cpu_is_bsp(cpu);
5980
5981 if (kvm_enabled()) {
5982 kvm_arch_reset_vcpu(cpu);
5983 }
5984 else if (hvf_enabled()) {
5985 hvf_reset_vcpu(s);
5986 }
5987 #endif
5988 }
5989
5990 #ifndef CONFIG_USER_ONLY
5991 bool cpu_is_bsp(X86CPU *cpu)
5992 {
5993 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
5994 }
5995
5996 /* TODO: remove me, when reset over QOM tree is implemented */
5997 static void x86_cpu_machine_reset_cb(void *opaque)
5998 {
5999 X86CPU *cpu = opaque;
6000 cpu_reset(CPU(cpu));
6001 }
6002 #endif
6003
6004 static void mce_init(X86CPU *cpu)
6005 {
6006 CPUX86State *cenv = &cpu->env;
6007 unsigned int bank;
6008
6009 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
6010 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
6011 (CPUID_MCE | CPUID_MCA)) {
6012 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
6013 (cpu->enable_lmce ? MCG_LMCE_P : 0);
6014 cenv->mcg_ctl = ~(uint64_t)0;
6015 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
6016 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
6017 }
6018 }
6019 }
6020
6021 #ifndef CONFIG_USER_ONLY
6022 APICCommonClass *apic_get_class(void)
6023 {
6024 const char *apic_type = "apic";
6025
6026 /* TODO: in-kernel irqchip for hvf */
6027 if (kvm_apic_in_kernel()) {
6028 apic_type = "kvm-apic";
6029 } else if (xen_enabled()) {
6030 apic_type = "xen-apic";
6031 }
6032
6033 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
6034 }
6035
6036 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
6037 {
6038 APICCommonState *apic;
6039 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
6040
6041 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
6042
6043 object_property_add_child(OBJECT(cpu), "lapic",
6044 OBJECT(cpu->apic_state), &error_abort);
6045 object_unref(OBJECT(cpu->apic_state));
6046
6047 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
6048 /* TODO: convert to link<> */
6049 apic = APIC_COMMON(cpu->apic_state);
6050 apic->cpu = cpu;
6051 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
6052 }
6053
6054 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6055 {
6056 APICCommonState *apic;
6057 static bool apic_mmio_map_once;
6058
6059 if (cpu->apic_state == NULL) {
6060 return;
6061 }
6062 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6063 errp);
6064
6065 /* Map APIC MMIO area */
6066 apic = APIC_COMMON(cpu->apic_state);
6067 if (!apic_mmio_map_once) {
6068 memory_region_add_subregion_overlap(get_system_memory(),
6069 apic->apicbase &
6070 MSR_IA32_APICBASE_BASE,
6071 &apic->io_memory,
6072 0x1000);
6073 apic_mmio_map_once = true;
6074 }
6075 }
6076
6077 static void x86_cpu_machine_done(Notifier *n, void *unused)
6078 {
6079 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6080 MemoryRegion *smram =
6081 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6082
6083 if (smram) {
6084 cpu->smram = g_new(MemoryRegion, 1);
6085 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6086 smram, 0, 1ull << 32);
6087 memory_region_set_enabled(cpu->smram, true);
6088 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6089 }
6090 }
6091 #else
6092 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6093 {
6094 }
6095 #endif
6096
6097 /* Note: Only safe for use on x86(-64) hosts */
6098 static uint32_t x86_host_phys_bits(void)
6099 {
6100 uint32_t eax;
6101 uint32_t host_phys_bits;
6102
6103 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6104 if (eax >= 0x80000008) {
6105 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6106 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6107 * at 23:16 that can specify a maximum physical address bits for
6108 * the guest that can override this value; but I've not seen
6109 * anything with that set.
6110 */
6111 host_phys_bits = eax & 0xff;
6112 } else {
6113 /* It's an odd 64 bit machine that doesn't have the leaf for
6114 * physical address bits; fall back to 36 that's most older
6115 * Intel.
6116 */
6117 host_phys_bits = 36;
6118 }
6119
6120 return host_phys_bits;
6121 }
6122
6123 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6124 {
6125 if (*min < value) {
6126 *min = value;
6127 }
6128 }
6129
6130 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6131 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6132 {
6133 CPUX86State *env = &cpu->env;
6134 FeatureWordInfo *fi = &feature_word_info[w];
6135 uint32_t eax = fi->cpuid.eax;
6136 uint32_t region = eax & 0xF0000000;
6137
6138 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6139 if (!env->features[w]) {
6140 return;
6141 }
6142
6143 switch (region) {
6144 case 0x00000000:
6145 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6146 break;
6147 case 0x80000000:
6148 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6149 break;
6150 case 0xC0000000:
6151 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6152 break;
6153 }
6154
6155 if (eax == 7) {
6156 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6157 fi->cpuid.ecx);
6158 }
6159 }
6160
6161 /* Calculate XSAVE components based on the configured CPU feature flags */
6162 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6163 {
6164 CPUX86State *env = &cpu->env;
6165 int i;
6166 uint64_t mask;
6167
6168 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6169 return;
6170 }
6171
6172 mask = 0;
6173 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6174 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6175 if (env->features[esa->feature] & esa->bits) {
6176 mask |= (1ULL << i);
6177 }
6178 }
6179
6180 env->features[FEAT_XSAVE_COMP_LO] = mask;
6181 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6182 }
6183
6184 /***** Steps involved on loading and filtering CPUID data
6185 *
6186 * When initializing and realizing a CPU object, the steps
6187 * involved in setting up CPUID data are:
6188 *
6189 * 1) Loading CPU model definition (X86CPUDefinition). This is
6190 * implemented by x86_cpu_load_model() and should be completely
6191 * transparent, as it is done automatically by instance_init.
6192 * No code should need to look at X86CPUDefinition structs
6193 * outside instance_init.
6194 *
6195 * 2) CPU expansion. This is done by realize before CPUID
6196 * filtering, and will make sure host/accelerator data is
6197 * loaded for CPU models that depend on host capabilities
6198 * (e.g. "host"). Done by x86_cpu_expand_features().
6199 *
6200 * 3) CPUID filtering. This initializes extra data related to
6201 * CPUID, and checks if the host supports all capabilities
6202 * required by the CPU. Runnability of a CPU model is
6203 * determined at this step. Done by x86_cpu_filter_features().
6204 *
6205 * Some operations don't require all steps to be performed.
6206 * More precisely:
6207 *
6208 * - CPU instance creation (instance_init) will run only CPU
6209 * model loading. CPU expansion can't run at instance_init-time
6210 * because host/accelerator data may be not available yet.
6211 * - CPU realization will perform both CPU model expansion and CPUID
6212 * filtering, and return an error in case one of them fails.
6213 * - query-cpu-definitions needs to run all 3 steps. It needs
6214 * to run CPUID filtering, as the 'unavailable-features'
6215 * field is set based on the filtering results.
6216 * - The query-cpu-model-expansion QMP command only needs to run
6217 * CPU model loading and CPU expansion. It should not filter
6218 * any CPUID data based on host capabilities.
6219 */
6220
6221 /* Expand CPU configuration data, based on configured features
6222 * and host/accelerator capabilities when appropriate.
6223 */
6224 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6225 {
6226 CPUX86State *env = &cpu->env;
6227 FeatureWord w;
6228 int i;
6229 GList *l;
6230 Error *local_err = NULL;
6231
6232 for (l = plus_features; l; l = l->next) {
6233 const char *prop = l->data;
6234 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6235 if (local_err) {
6236 goto out;
6237 }
6238 }
6239
6240 for (l = minus_features; l; l = l->next) {
6241 const char *prop = l->data;
6242 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6243 if (local_err) {
6244 goto out;
6245 }
6246 }
6247
6248 /*TODO: Now cpu->max_features doesn't overwrite features
6249 * set using QOM properties, and we can convert
6250 * plus_features & minus_features to global properties
6251 * inside x86_cpu_parse_featurestr() too.
6252 */
6253 if (cpu->max_features) {
6254 for (w = 0; w < FEATURE_WORDS; w++) {
6255 /* Override only features that weren't set explicitly
6256 * by the user.
6257 */
6258 env->features[w] |=
6259 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6260 ~env->user_features[w] & \
6261 ~feature_word_info[w].no_autoenable_flags;
6262 }
6263 }
6264
6265 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6266 FeatureDep *d = &feature_dependencies[i];
6267 if (!(env->features[d->from.index] & d->from.mask)) {
6268 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6269
6270 /* Not an error unless the dependent feature was added explicitly. */
6271 mark_unavailable_features(cpu, d->to.index,
6272 unavailable_features & env->user_features[d->to.index],
6273 "This feature depends on other features that were not requested");
6274
6275 env->user_features[d->to.index] |= unavailable_features;
6276 env->features[d->to.index] &= ~unavailable_features;
6277 }
6278 }
6279
6280 if (!kvm_enabled() || !cpu->expose_kvm) {
6281 env->features[FEAT_KVM] = 0;
6282 }
6283
6284 x86_cpu_enable_xsave_components(cpu);
6285
6286 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6287 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6288 if (cpu->full_cpuid_auto_level) {
6289 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6290 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6291 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6292 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6293 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6294 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6295 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6296 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6297 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6298 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6299 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6300 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6301
6302 /* Intel Processor Trace requires CPUID[0x14] */
6303 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6304 kvm_enabled() && cpu->intel_pt_auto_level) {
6305 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6306 }
6307
6308 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6309 if (env->nr_dies > 1) {
6310 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6311 }
6312
6313 /* SVM requires CPUID[0x8000000A] */
6314 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6315 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6316 }
6317
6318 /* SEV requires CPUID[0x8000001F] */
6319 if (sev_enabled()) {
6320 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6321 }
6322 }
6323
6324 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6325 if (env->cpuid_level_func7 == UINT32_MAX) {
6326 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6327 }
6328 if (env->cpuid_level == UINT32_MAX) {
6329 env->cpuid_level = env->cpuid_min_level;
6330 }
6331 if (env->cpuid_xlevel == UINT32_MAX) {
6332 env->cpuid_xlevel = env->cpuid_min_xlevel;
6333 }
6334 if (env->cpuid_xlevel2 == UINT32_MAX) {
6335 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6336 }
6337
6338 out:
6339 if (local_err != NULL) {
6340 error_propagate(errp, local_err);
6341 }
6342 }
6343
6344 /*
6345 * Finishes initialization of CPUID data, filters CPU feature
6346 * words based on host availability of each feature.
6347 *
6348 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6349 */
6350 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6351 {
6352 CPUX86State *env = &cpu->env;
6353 FeatureWord w;
6354 const char *prefix = NULL;
6355
6356 if (verbose) {
6357 prefix = accel_uses_host_cpuid()
6358 ? "host doesn't support requested feature"
6359 : "TCG doesn't support requested feature";
6360 }
6361
6362 for (w = 0; w < FEATURE_WORDS; w++) {
6363 uint64_t host_feat =
6364 x86_cpu_get_supported_feature_word(w, false);
6365 uint64_t requested_features = env->features[w];
6366 uint64_t unavailable_features = requested_features & ~host_feat;
6367 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6368 }
6369
6370 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6371 kvm_enabled()) {
6372 KVMState *s = CPU(cpu)->kvm_state;
6373 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6374 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6375 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6376 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6377 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6378
6379 if (!eax_0 ||
6380 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6381 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6382 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6383 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6384 INTEL_PT_ADDR_RANGES_NUM) ||
6385 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6386 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6387 (ecx_0 & INTEL_PT_IP_LIP)) {
6388 /*
6389 * Processor Trace capabilities aren't configurable, so if the
6390 * host can't emulate the capabilities we report on
6391 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6392 */
6393 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6394 }
6395 }
6396 }
6397
6398 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6399 {
6400 CPUState *cs = CPU(dev);
6401 X86CPU *cpu = X86_CPU(dev);
6402 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6403 CPUX86State *env = &cpu->env;
6404 Error *local_err = NULL;
6405 static bool ht_warned;
6406
6407 if (xcc->host_cpuid_required) {
6408 if (!accel_uses_host_cpuid()) {
6409 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6410 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6411 goto out;
6412 }
6413
6414 if (enable_cpu_pm) {
6415 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6416 &cpu->mwait.ecx, &cpu->mwait.edx);
6417 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6418 }
6419 if (kvm_enabled() && cpu->ucode_rev == 0) {
6420 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
6421 MSR_IA32_UCODE_REV);
6422 }
6423 }
6424
6425 if (cpu->ucode_rev == 0) {
6426 /* The default is the same as KVM's. */
6427 if (IS_AMD_CPU(env)) {
6428 cpu->ucode_rev = 0x01000065;
6429 } else {
6430 cpu->ucode_rev = 0x100000000ULL;
6431 }
6432 }
6433
6434 /* mwait extended info: needed for Core compatibility */
6435 /* We always wake on interrupt even if host does not have the capability */
6436 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6437
6438 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6439 error_setg(errp, "apic-id property was not initialized properly");
6440 return;
6441 }
6442
6443 x86_cpu_expand_features(cpu, &local_err);
6444 if (local_err) {
6445 goto out;
6446 }
6447
6448 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6449
6450 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6451 error_setg(&local_err,
6452 accel_uses_host_cpuid() ?
6453 "Host doesn't support requested features" :
6454 "TCG doesn't support requested features");
6455 goto out;
6456 }
6457
6458 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6459 * CPUID[1].EDX.
6460 */
6461 if (IS_AMD_CPU(env)) {
6462 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6463 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6464 & CPUID_EXT2_AMD_ALIASES);
6465 }
6466
6467 /* For 64bit systems think about the number of physical bits to present.
6468 * ideally this should be the same as the host; anything other than matching
6469 * the host can cause incorrect guest behaviour.
6470 * QEMU used to pick the magic value of 40 bits that corresponds to
6471 * consumer AMD devices but nothing else.
6472 */
6473 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6474 if (accel_uses_host_cpuid()) {
6475 uint32_t host_phys_bits = x86_host_phys_bits();
6476 static bool warned;
6477
6478 /* Print a warning if the user set it to a value that's not the
6479 * host value.
6480 */
6481 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6482 !warned) {
6483 warn_report("Host physical bits (%u)"
6484 " does not match phys-bits property (%u)",
6485 host_phys_bits, cpu->phys_bits);
6486 warned = true;
6487 }
6488
6489 if (cpu->host_phys_bits) {
6490 /* The user asked for us to use the host physical bits */
6491 cpu->phys_bits = host_phys_bits;
6492 if (cpu->host_phys_bits_limit &&
6493 cpu->phys_bits > cpu->host_phys_bits_limit) {
6494 cpu->phys_bits = cpu->host_phys_bits_limit;
6495 }
6496 }
6497
6498 if (cpu->phys_bits &&
6499 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6500 cpu->phys_bits < 32)) {
6501 error_setg(errp, "phys-bits should be between 32 and %u "
6502 " (but is %u)",
6503 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6504 return;
6505 }
6506 } else {
6507 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6508 error_setg(errp, "TCG only supports phys-bits=%u",
6509 TCG_PHYS_ADDR_BITS);
6510 return;
6511 }
6512 }
6513 /* 0 means it was not explicitly set by the user (or by machine
6514 * compat_props or by the host code above). In this case, the default
6515 * is the value used by TCG (40).
6516 */
6517 if (cpu->phys_bits == 0) {
6518 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6519 }
6520 } else {
6521 /* For 32 bit systems don't use the user set value, but keep
6522 * phys_bits consistent with what we tell the guest.
6523 */
6524 if (cpu->phys_bits != 0) {
6525 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6526 return;
6527 }
6528
6529 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6530 cpu->phys_bits = 36;
6531 } else {
6532 cpu->phys_bits = 32;
6533 }
6534 }
6535
6536 /* Cache information initialization */
6537 if (!cpu->legacy_cache) {
6538 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6539 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6540 error_setg(errp,
6541 "CPU model '%s' doesn't support legacy-cache=off", name);
6542 return;
6543 }
6544 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6545 *xcc->model->cpudef->cache_info;
6546 } else {
6547 /* Build legacy cache information */
6548 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6549 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6550 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6551 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6552
6553 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6554 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6555 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6556 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6557
6558 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6559 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6560 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6561 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6562 }
6563
6564
6565 cpu_exec_realizefn(cs, &local_err);
6566 if (local_err != NULL) {
6567 error_propagate(errp, local_err);
6568 return;
6569 }
6570
6571 #ifndef CONFIG_USER_ONLY
6572 MachineState *ms = MACHINE(qdev_get_machine());
6573 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6574
6575 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6576 x86_cpu_apic_create(cpu, &local_err);
6577 if (local_err != NULL) {
6578 goto out;
6579 }
6580 }
6581 #endif
6582
6583 mce_init(cpu);
6584
6585 #ifndef CONFIG_USER_ONLY
6586 if (tcg_enabled()) {
6587 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6588 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6589
6590 /* Outer container... */
6591 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6592 memory_region_set_enabled(cpu->cpu_as_root, true);
6593
6594 /* ... with two regions inside: normal system memory with low
6595 * priority, and...
6596 */
6597 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6598 get_system_memory(), 0, ~0ull);
6599 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6600 memory_region_set_enabled(cpu->cpu_as_mem, true);
6601
6602 cs->num_ases = 2;
6603 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6604 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6605
6606 /* ... SMRAM with higher priority, linked from /machine/smram. */
6607 cpu->machine_done.notify = x86_cpu_machine_done;
6608 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6609 }
6610 #endif
6611
6612 qemu_init_vcpu(cs);
6613
6614 /*
6615 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6616 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6617 * based on inputs (sockets,cores,threads), it is still better to give
6618 * users a warning.
6619 *
6620 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6621 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6622 */
6623 if (IS_AMD_CPU(env) &&
6624 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6625 cs->nr_threads > 1 && !ht_warned) {
6626 warn_report("This family of AMD CPU doesn't support "
6627 "hyperthreading(%d)",
6628 cs->nr_threads);
6629 error_printf("Please configure -smp options properly"
6630 " or try enabling topoext feature.\n");
6631 ht_warned = true;
6632 }
6633
6634 x86_cpu_apic_realize(cpu, &local_err);
6635 if (local_err != NULL) {
6636 goto out;
6637 }
6638 cpu_reset(cs);
6639
6640 xcc->parent_realize(dev, &local_err);
6641
6642 out:
6643 if (local_err != NULL) {
6644 error_propagate(errp, local_err);
6645 return;
6646 }
6647 }
6648
6649 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
6650 {
6651 X86CPU *cpu = X86_CPU(dev);
6652 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6653 Error *local_err = NULL;
6654
6655 #ifndef CONFIG_USER_ONLY
6656 cpu_remove_sync(CPU(dev));
6657 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6658 #endif
6659
6660 if (cpu->apic_state) {
6661 object_unparent(OBJECT(cpu->apic_state));
6662 cpu->apic_state = NULL;
6663 }
6664
6665 xcc->parent_unrealize(dev, &local_err);
6666 if (local_err != NULL) {
6667 error_propagate(errp, local_err);
6668 return;
6669 }
6670 }
6671
6672 typedef struct BitProperty {
6673 FeatureWord w;
6674 uint64_t mask;
6675 } BitProperty;
6676
6677 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6678 void *opaque, Error **errp)
6679 {
6680 X86CPU *cpu = X86_CPU(obj);
6681 BitProperty *fp = opaque;
6682 uint64_t f = cpu->env.features[fp->w];
6683 bool value = (f & fp->mask) == fp->mask;
6684 visit_type_bool(v, name, &value, errp);
6685 }
6686
6687 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6688 void *opaque, Error **errp)
6689 {
6690 DeviceState *dev = DEVICE(obj);
6691 X86CPU *cpu = X86_CPU(obj);
6692 BitProperty *fp = opaque;
6693 Error *local_err = NULL;
6694 bool value;
6695
6696 if (dev->realized) {
6697 qdev_prop_set_after_realize(dev, name, errp);
6698 return;
6699 }
6700
6701 visit_type_bool(v, name, &value, &local_err);
6702 if (local_err) {
6703 error_propagate(errp, local_err);
6704 return;
6705 }
6706
6707 if (value) {
6708 cpu->env.features[fp->w] |= fp->mask;
6709 } else {
6710 cpu->env.features[fp->w] &= ~fp->mask;
6711 }
6712 cpu->env.user_features[fp->w] |= fp->mask;
6713 }
6714
6715 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6716 void *opaque)
6717 {
6718 BitProperty *prop = opaque;
6719 g_free(prop);
6720 }
6721
6722 /* Register a boolean property to get/set a single bit in a uint32_t field.
6723 *
6724 * The same property name can be registered multiple times to make it affect
6725 * multiple bits in the same FeatureWord. In that case, the getter will return
6726 * true only if all bits are set.
6727 */
6728 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6729 const char *prop_name,
6730 FeatureWord w,
6731 int bitnr)
6732 {
6733 BitProperty *fp;
6734 ObjectProperty *op;
6735 uint64_t mask = (1ULL << bitnr);
6736
6737 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6738 if (op) {
6739 fp = op->opaque;
6740 assert(fp->w == w);
6741 fp->mask |= mask;
6742 } else {
6743 fp = g_new0(BitProperty, 1);
6744 fp->w = w;
6745 fp->mask = mask;
6746 object_property_add(OBJECT(cpu), prop_name, "bool",
6747 x86_cpu_get_bit_prop,
6748 x86_cpu_set_bit_prop,
6749 x86_cpu_release_bit_prop, fp, &error_abort);
6750 }
6751 }
6752
6753 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6754 FeatureWord w,
6755 int bitnr)
6756 {
6757 FeatureWordInfo *fi = &feature_word_info[w];
6758 const char *name = fi->feat_names[bitnr];
6759
6760 if (!name) {
6761 return;
6762 }
6763
6764 /* Property names should use "-" instead of "_".
6765 * Old names containing underscores are registered as aliases
6766 * using object_property_add_alias()
6767 */
6768 assert(!strchr(name, '_'));
6769 /* aliases don't use "|" delimiters anymore, they are registered
6770 * manually using object_property_add_alias() */
6771 assert(!strchr(name, '|'));
6772 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6773 }
6774
6775 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6776 {
6777 X86CPU *cpu = X86_CPU(cs);
6778 CPUX86State *env = &cpu->env;
6779 GuestPanicInformation *panic_info = NULL;
6780
6781 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6782 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6783
6784 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6785
6786 assert(HV_CRASH_PARAMS >= 5);
6787 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6788 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6789 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6790 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6791 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6792 }
6793
6794 return panic_info;
6795 }
6796 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6797 const char *name, void *opaque,
6798 Error **errp)
6799 {
6800 CPUState *cs = CPU(obj);
6801 GuestPanicInformation *panic_info;
6802
6803 if (!cs->crash_occurred) {
6804 error_setg(errp, "No crash occured");
6805 return;
6806 }
6807
6808 panic_info = x86_cpu_get_crash_info(cs);
6809 if (panic_info == NULL) {
6810 error_setg(errp, "No crash information");
6811 return;
6812 }
6813
6814 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6815 errp);
6816 qapi_free_GuestPanicInformation(panic_info);
6817 }
6818
6819 static void x86_cpu_initfn(Object *obj)
6820 {
6821 X86CPU *cpu = X86_CPU(obj);
6822 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6823 CPUX86State *env = &cpu->env;
6824 FeatureWord w;
6825
6826 env->nr_dies = 1;
6827 cpu_set_cpustate_pointers(cpu);
6828
6829 object_property_add(obj, "family", "int",
6830 x86_cpuid_version_get_family,
6831 x86_cpuid_version_set_family, NULL, NULL, NULL);
6832 object_property_add(obj, "model", "int",
6833 x86_cpuid_version_get_model,
6834 x86_cpuid_version_set_model, NULL, NULL, NULL);
6835 object_property_add(obj, "stepping", "int",
6836 x86_cpuid_version_get_stepping,
6837 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
6838 object_property_add_str(obj, "vendor",
6839 x86_cpuid_get_vendor,
6840 x86_cpuid_set_vendor, NULL);
6841 object_property_add_str(obj, "model-id",
6842 x86_cpuid_get_model_id,
6843 x86_cpuid_set_model_id, NULL);
6844 object_property_add(obj, "tsc-frequency", "int",
6845 x86_cpuid_get_tsc_freq,
6846 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
6847 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6848 x86_cpu_get_feature_words,
6849 NULL, NULL, (void *)env->features, NULL);
6850 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6851 x86_cpu_get_feature_words,
6852 NULL, NULL, (void *)cpu->filtered_features, NULL);
6853 /*
6854 * The "unavailable-features" property has the same semantics as
6855 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6856 * QMP command: they list the features that would have prevented the
6857 * CPU from running if the "enforce" flag was set.
6858 */
6859 object_property_add(obj, "unavailable-features", "strList",
6860 x86_cpu_get_unavailable_features,
6861 NULL, NULL, NULL, &error_abort);
6862
6863 object_property_add(obj, "crash-information", "GuestPanicInformation",
6864 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
6865
6866 for (w = 0; w < FEATURE_WORDS; w++) {
6867 int bitnr;
6868
6869 for (bitnr = 0; bitnr < 64; bitnr++) {
6870 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6871 }
6872 }
6873
6874 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
6875 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
6876 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
6877 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
6878 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
6879 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
6880 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
6881
6882 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
6883 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
6884 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
6885 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
6886 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
6887 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
6888 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
6889 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
6890 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
6891 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
6892 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
6893 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
6894 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
6895 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
6896 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
6897 &error_abort);
6898 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
6899 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
6900 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
6901 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
6902 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
6903 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
6904 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
6905
6906 if (xcc->model) {
6907 x86_cpu_load_model(cpu, xcc->model, &error_abort);
6908 }
6909 }
6910
6911 static int64_t x86_cpu_get_arch_id(CPUState *cs)
6912 {
6913 X86CPU *cpu = X86_CPU(cs);
6914
6915 return cpu->apic_id;
6916 }
6917
6918 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6919 {
6920 X86CPU *cpu = X86_CPU(cs);
6921
6922 return cpu->env.cr[0] & CR0_PG_MASK;
6923 }
6924
6925 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
6926 {
6927 X86CPU *cpu = X86_CPU(cs);
6928
6929 cpu->env.eip = value;
6930 }
6931
6932 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
6933 {
6934 X86CPU *cpu = X86_CPU(cs);
6935
6936 cpu->env.eip = tb->pc - tb->cs_base;
6937 }
6938
6939 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
6940 {
6941 X86CPU *cpu = X86_CPU(cs);
6942 CPUX86State *env = &cpu->env;
6943
6944 #if !defined(CONFIG_USER_ONLY)
6945 if (interrupt_request & CPU_INTERRUPT_POLL) {
6946 return CPU_INTERRUPT_POLL;
6947 }
6948 #endif
6949 if (interrupt_request & CPU_INTERRUPT_SIPI) {
6950 return CPU_INTERRUPT_SIPI;
6951 }
6952
6953 if (env->hflags2 & HF2_GIF_MASK) {
6954 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
6955 !(env->hflags & HF_SMM_MASK)) {
6956 return CPU_INTERRUPT_SMI;
6957 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
6958 !(env->hflags2 & HF2_NMI_MASK)) {
6959 return CPU_INTERRUPT_NMI;
6960 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
6961 return CPU_INTERRUPT_MCE;
6962 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
6963 (((env->hflags2 & HF2_VINTR_MASK) &&
6964 (env->hflags2 & HF2_HIF_MASK)) ||
6965 (!(env->hflags2 & HF2_VINTR_MASK) &&
6966 (env->eflags & IF_MASK &&
6967 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
6968 return CPU_INTERRUPT_HARD;
6969 #if !defined(CONFIG_USER_ONLY)
6970 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
6971 (env->eflags & IF_MASK) &&
6972 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
6973 return CPU_INTERRUPT_VIRQ;
6974 #endif
6975 }
6976 }
6977
6978 return 0;
6979 }
6980
6981 static bool x86_cpu_has_work(CPUState *cs)
6982 {
6983 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
6984 }
6985
6986 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
6987 {
6988 X86CPU *cpu = X86_CPU(cs);
6989 CPUX86State *env = &cpu->env;
6990
6991 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
6992 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
6993 : bfd_mach_i386_i8086);
6994 info->print_insn = print_insn_i386;
6995
6996 info->cap_arch = CS_ARCH_X86;
6997 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
6998 : env->hflags & HF_CS32_MASK ? CS_MODE_32
6999 : CS_MODE_16);
7000 info->cap_insn_unit = 1;
7001 info->cap_insn_split = 8;
7002 }
7003
7004 void x86_update_hflags(CPUX86State *env)
7005 {
7006 uint32_t hflags;
7007 #define HFLAG_COPY_MASK \
7008 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
7009 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
7010 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
7011 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
7012
7013 hflags = env->hflags & HFLAG_COPY_MASK;
7014 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
7015 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
7016 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
7017 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
7018 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
7019
7020 if (env->cr[4] & CR4_OSFXSR_MASK) {
7021 hflags |= HF_OSFXSR_MASK;
7022 }
7023
7024 if (env->efer & MSR_EFER_LMA) {
7025 hflags |= HF_LMA_MASK;
7026 }
7027
7028 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
7029 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
7030 } else {
7031 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
7032 (DESC_B_SHIFT - HF_CS32_SHIFT);
7033 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
7034 (DESC_B_SHIFT - HF_SS32_SHIFT);
7035 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
7036 !(hflags & HF_CS32_MASK)) {
7037 hflags |= HF_ADDSEG_MASK;
7038 } else {
7039 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
7040 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
7041 }
7042 }
7043 env->hflags = hflags;
7044 }
7045
7046 static Property x86_cpu_properties[] = {
7047 #ifdef CONFIG_USER_ONLY
7048 /* apic_id = 0 by default for *-user, see commit 9886e834 */
7049 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
7050 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
7051 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
7052 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
7053 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
7054 #else
7055 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
7056 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
7057 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
7058 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
7059 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
7060 #endif
7061 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
7062 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
7063
7064 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
7065 HYPERV_SPINLOCK_NEVER_RETRY),
7066 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
7067 HYPERV_FEAT_RELAXED, 0),
7068 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7069 HYPERV_FEAT_VAPIC, 0),
7070 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7071 HYPERV_FEAT_TIME, 0),
7072 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7073 HYPERV_FEAT_CRASH, 0),
7074 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7075 HYPERV_FEAT_RESET, 0),
7076 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7077 HYPERV_FEAT_VPINDEX, 0),
7078 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7079 HYPERV_FEAT_RUNTIME, 0),
7080 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7081 HYPERV_FEAT_SYNIC, 0),
7082 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7083 HYPERV_FEAT_STIMER, 0),
7084 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7085 HYPERV_FEAT_FREQUENCIES, 0),
7086 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7087 HYPERV_FEAT_REENLIGHTENMENT, 0),
7088 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7089 HYPERV_FEAT_TLBFLUSH, 0),
7090 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7091 HYPERV_FEAT_EVMCS, 0),
7092 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7093 HYPERV_FEAT_IPI, 0),
7094 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7095 HYPERV_FEAT_STIMER_DIRECT, 0),
7096 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7097 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7098 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7099
7100 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7101 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7102 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7103 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7104 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7105 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7106 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7107 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7108 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7109 UINT32_MAX),
7110 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7111 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7112 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7113 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7114 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7115 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7116 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
7117 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7118 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7119 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7120 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7121 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7122 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7123 false),
7124 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7125 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7126 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7127 true),
7128 /*
7129 * lecacy_cache defaults to true unless the CPU model provides its
7130 * own cache information (see x86_cpu_load_def()).
7131 */
7132 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7133
7134 /*
7135 * From "Requirements for Implementing the Microsoft
7136 * Hypervisor Interface":
7137 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7138 *
7139 * "Starting with Windows Server 2012 and Windows 8, if
7140 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7141 * the hypervisor imposes no specific limit to the number of VPs.
7142 * In this case, Windows Server 2012 guest VMs may use more than
7143 * 64 VPs, up to the maximum supported number of processors applicable
7144 * to the specific Windows version being used."
7145 */
7146 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7147 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7148 false),
7149 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7150 true),
7151 DEFINE_PROP_END_OF_LIST()
7152 };
7153
7154 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7155 {
7156 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7157 CPUClass *cc = CPU_CLASS(oc);
7158 DeviceClass *dc = DEVICE_CLASS(oc);
7159
7160 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7161 &xcc->parent_realize);
7162 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7163 &xcc->parent_unrealize);
7164 device_class_set_props(dc, x86_cpu_properties);
7165
7166 cpu_class_set_parent_reset(cc, x86_cpu_reset, &xcc->parent_reset);
7167 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7168
7169 cc->class_by_name = x86_cpu_class_by_name;
7170 cc->parse_features = x86_cpu_parse_featurestr;
7171 cc->has_work = x86_cpu_has_work;
7172 #ifdef CONFIG_TCG
7173 cc->do_interrupt = x86_cpu_do_interrupt;
7174 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7175 #endif
7176 cc->dump_state = x86_cpu_dump_state;
7177 cc->get_crash_info = x86_cpu_get_crash_info;
7178 cc->set_pc = x86_cpu_set_pc;
7179 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7180 cc->gdb_read_register = x86_cpu_gdb_read_register;
7181 cc->gdb_write_register = x86_cpu_gdb_write_register;
7182 cc->get_arch_id = x86_cpu_get_arch_id;
7183 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7184 #ifndef CONFIG_USER_ONLY
7185 cc->asidx_from_attrs = x86_asidx_from_attrs;
7186 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7187 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7188 cc->write_elf64_note = x86_cpu_write_elf64_note;
7189 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7190 cc->write_elf32_note = x86_cpu_write_elf32_note;
7191 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7192 cc->vmsd = &vmstate_x86_cpu;
7193 #endif
7194 cc->gdb_arch_name = x86_gdb_arch_name;
7195 #ifdef TARGET_X86_64
7196 cc->gdb_core_xml_file = "i386-64bit.xml";
7197 cc->gdb_num_core_regs = 66;
7198 #else
7199 cc->gdb_core_xml_file = "i386-32bit.xml";
7200 cc->gdb_num_core_regs = 50;
7201 #endif
7202 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7203 cc->debug_excp_handler = breakpoint_handler;
7204 #endif
7205 cc->cpu_exec_enter = x86_cpu_exec_enter;
7206 cc->cpu_exec_exit = x86_cpu_exec_exit;
7207 #ifdef CONFIG_TCG
7208 cc->tcg_initialize = tcg_x86_init;
7209 cc->tlb_fill = x86_cpu_tlb_fill;
7210 #endif
7211 cc->disas_set_info = x86_disas_set_info;
7212
7213 dc->user_creatable = true;
7214 }
7215
7216 static const TypeInfo x86_cpu_type_info = {
7217 .name = TYPE_X86_CPU,
7218 .parent = TYPE_CPU,
7219 .instance_size = sizeof(X86CPU),
7220 .instance_init = x86_cpu_initfn,
7221 .abstract = true,
7222 .class_size = sizeof(X86CPUClass),
7223 .class_init = x86_cpu_common_class_init,
7224 };
7225
7226
7227 /* "base" CPU model, used by query-cpu-model-expansion */
7228 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7229 {
7230 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7231
7232 xcc->static_model = true;
7233 xcc->migration_safe = true;
7234 xcc->model_description = "base CPU model type with no features enabled";
7235 xcc->ordering = 8;
7236 }
7237
7238 static const TypeInfo x86_base_cpu_type_info = {
7239 .name = X86_CPU_TYPE_NAME("base"),
7240 .parent = TYPE_X86_CPU,
7241 .class_init = x86_cpu_base_class_init,
7242 };
7243
7244 static void x86_cpu_register_types(void)
7245 {
7246 int i;
7247
7248 type_register_static(&x86_cpu_type_info);
7249 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7250 x86_register_cpudef_types(&builtin_x86_defs[i]);
7251 }
7252 type_register_static(&max_x86_cpu_type_info);
7253 type_register_static(&x86_base_cpu_type_info);
7254 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7255 type_register_static(&host_x86_cpu_type_info);
7256 #endif
7257 }
7258
7259 type_init(x86_cpu_register_types)