]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/common/config/i386/cpuinfo.h
Merge remote-tracking branch 'origin/master' into devel/c++-contracts
[thirdparty/gcc.git] / gcc / common / config / i386 / cpuinfo.h
CommitLineData
1890f2f0 1/* Get CPU type and Features for x86 processors.
7adcbafe 2 Copyright (C) 2012-2022 Free Software Foundation, Inc.
1890f2f0
L
3 Contributed by Sriraman Tallam (tmsriram@google.com)
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
25
26struct __processor_model
27{
28 unsigned int __cpu_vendor;
29 unsigned int __cpu_type;
30 unsigned int __cpu_subtype;
31 /* The first 32 features are stored as bitmasks in __cpu_features.
32 The rest of features are stored as bitmasks in a separate array
33 of unsigned int. */
34 unsigned int __cpu_features[1];
35};
36
37struct __processor_model2
38{
39 unsigned int __cpu_family;
40 unsigned int __cpu_model;
41 unsigned int __cpu_max_level;
42 unsigned int __cpu_ext_level;
43};
44
45#ifndef CHECK___builtin_cpu_is
46# define CHECK___builtin_cpu_is(cpu)
47#endif
48
8ea29259
ML
49#ifndef CHECK___builtin_cpu_supports
50# define CHECK___builtin_cpu_supports(isa)
51#endif
52
1890f2f0
L
53/* Return non-zero if the processor has feature F. */
54
55static inline int
56has_cpu_feature (struct __processor_model *cpu_model,
57 unsigned int *cpu_features2,
ef14bba0 58 enum processor_features feature)
1890f2f0 59{
ef14bba0
ML
60 unsigned index, offset;
61 unsigned f = feature;
62
1890f2f0
L
63 if (f < 32)
64 {
65 /* The first 32 features. */
ef14bba0
ML
66 return cpu_model->__cpu_features[0] & (1U << f);
67 }
68 else
69 {
70 /* The rest of features. cpu_features2[i] contains features from
71 (32 + i * 32) to (31 + 32 + i * 32), inclusively. */
72 f -= 32;
73 index = f / 32;
74 offset = f % 32;
75 return cpu_features2[index] & (1U << offset);
1890f2f0 76 }
1890f2f0
L
77}
78
d80b7744
ML
79/* Save FEATURE to either CPU_MODEL or CPU_FEATURES2. */
80
1890f2f0
L
81static inline void
82set_cpu_feature (struct __processor_model *cpu_model,
83 unsigned int *cpu_features2,
ef14bba0 84 enum processor_features feature)
1890f2f0 85{
ef14bba0
ML
86 unsigned index, offset;
87 unsigned f = feature;
88
1890f2f0
L
89 if (f < 32)
90 {
91 /* The first 32 features. */
ef14bba0
ML
92 cpu_model->__cpu_features[0] |= (1U << f);
93 }
94 else
95 {
96 /* The rest of features. cpu_features2[i] contains features from
97 (32 + i * 32) to (31 + 32 + i * 32), inclusively. */
98 f -= 32;
99 index = f / 32;
100 offset = f % 32;
101 cpu_features2[index] |= (1U << offset);
1890f2f0 102 }
1890f2f0
L
103}
104
d80b7744
ML
105/* Drop FEATURE from either CPU_MODEL or CPU_FEATURES2. */
106
107static inline void
108reset_cpu_feature (struct __processor_model *cpu_model,
109 unsigned int *cpu_features2,
110 enum processor_features feature)
111{
112 unsigned index, offset;
113 unsigned f = feature;
114
115 if (f < 32)
116 {
117 /* The first 32 features. */
118 cpu_model->__cpu_features[0] &= ~(1U << f);
119 }
120 else
121 {
122 /* The rest of features. cpu_features2[i] contains features from
123 (32 + i * 32) to (31 + 32 + i * 32), inclusively. */
124 f -= 32;
125 index = f / 32;
126 offset = f % 32;
127 cpu_features2[index] &= ~(1U << offset);
128 }
129}
130
1890f2f0
L
131/* Get the specific type of AMD CPU and return AMD CPU name. Return
132 NULL for unknown AMD CPU. */
133
134static inline const char *
135get_amd_cpu (struct __processor_model *cpu_model,
136 struct __processor_model2 *cpu_model2,
137 unsigned int *cpu_features2)
138{
139 const char *cpu = NULL;
140 unsigned int family = cpu_model2->__cpu_family;
141 unsigned int model = cpu_model2->__cpu_model;
142
143 switch (family)
144 {
145 case 0x10:
146 /* AMD Family 10h. */
147 cpu = "amdfam10";
148 cpu_model->__cpu_type = AMDFAM10H;
149 switch (model)
150 {
151 case 0x2:
152 /* Barcelona. */
153 CHECK___builtin_cpu_is ("amdfam10h");
154 CHECK___builtin_cpu_is ("barcelona");
155 cpu_model->__cpu_subtype = AMDFAM10H_BARCELONA;
156 break;
157 case 0x4:
158 /* Shanghai. */
159 CHECK___builtin_cpu_is ("amdfam10h");
160 CHECK___builtin_cpu_is ("shanghai");
161 cpu_model->__cpu_subtype = AMDFAM10H_SHANGHAI;
162 break;
163 case 0x8:
164 /* Istanbul. */
165 CHECK___builtin_cpu_is ("amdfam10h");
166 CHECK___builtin_cpu_is ("istanbul");
167 cpu_model->__cpu_subtype = AMDFAM10H_ISTANBUL;
168 break;
169 default:
170 break;
171 }
172 break;
173 case 0x14:
174 /* AMD Family 14h "btver1". */
175 cpu = "btver1";
176 CHECK___builtin_cpu_is ("btver1");
177 cpu_model->__cpu_type = AMD_BTVER1;
178 break;
179 case 0x15:
180 /* AMD Family 15h "Bulldozer". */
181 cpu_model->__cpu_type = AMDFAM15H;
182 if (model == 0x2)
183 {
184 /* Bulldozer version 2 "Piledriver" */
185 cpu = "bdver2";
186 CHECK___builtin_cpu_is ("bdver2");
187 cpu_model->__cpu_subtype = AMDFAM15H_BDVER2;
188 }
189 else if (model <= 0xf)
190 {
191 /* Bulldozer version 1. */
192 cpu = "bdver1";
193 CHECK___builtin_cpu_is ("bdver1");
194 cpu_model->__cpu_subtype = AMDFAM15H_BDVER1;
195 }
196 else if (model <= 0x2f)
197 {
198 /* Bulldozer version 2 "Piledriver" */
199 cpu = "bdver2";
200 CHECK___builtin_cpu_is ("bdver2");
201 cpu_model->__cpu_subtype = AMDFAM15H_BDVER2;
202 }
203 else if (model <= 0x4f)
204 {
205 /* Bulldozer version 3 "Steamroller" */
206 cpu = "bdver3";
207 CHECK___builtin_cpu_is ("bdver3");
208 cpu_model->__cpu_subtype = AMDFAM15H_BDVER3;
209 }
210 else if (model <= 0x7f)
211 {
212 /* Bulldozer version 4 "Excavator" */
213 cpu = "bdver4";
214 CHECK___builtin_cpu_is ("bdver4");
215 cpu_model->__cpu_subtype = AMDFAM15H_BDVER4;
216 }
217 else if (has_cpu_feature (cpu_model, cpu_features2,
218 FEATURE_AVX2))
219 {
220 cpu = "bdver4";
221 CHECK___builtin_cpu_is ("bdver4");
222 cpu_model->__cpu_subtype = AMDFAM15H_BDVER4;
223 }
224 else if (has_cpu_feature (cpu_model, cpu_features2,
225 FEATURE_XSAVEOPT))
226 {
227 cpu = "bdver3";
228 CHECK___builtin_cpu_is ("bdver3");
229 cpu_model->__cpu_subtype = AMDFAM15H_BDVER3;
230 }
231 else if (has_cpu_feature (cpu_model, cpu_features2,
232 FEATURE_BMI))
233 {
234 cpu = "bdver2";
235 CHECK___builtin_cpu_is ("bdver2");
236 cpu_model->__cpu_subtype = AMDFAM15H_BDVER2;
237 }
238 else if (has_cpu_feature (cpu_model, cpu_features2,
239 FEATURE_XOP))
240 {
241 cpu = "bdver1";
242 CHECK___builtin_cpu_is ("bdver1");
243 cpu_model->__cpu_subtype = AMDFAM15H_BDVER1;
244 }
245 break;
246 case 0x16:
247 /* AMD Family 16h "btver2" */
248 cpu = "btver2";
249 CHECK___builtin_cpu_is ("btver2");
250 cpu_model->__cpu_type = AMD_BTVER2;
251 break;
252 case 0x17:
253 cpu_model->__cpu_type = AMDFAM17H;
254 if (model <= 0x1f)
255 {
256 /* AMD family 17h version 1. */
257 cpu = "znver1";
258 CHECK___builtin_cpu_is ("znver1");
259 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER1;
260 }
261 else if (model >= 0x30)
262 {
263 cpu = "znver2";
264 CHECK___builtin_cpu_is ("znver2");
265 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER2;
266 }
267 else if (has_cpu_feature (cpu_model, cpu_features2,
268 FEATURE_CLWB))
269 {
270 cpu = "znver2";
271 CHECK___builtin_cpu_is ("znver2");
272 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER2;
273 }
274 else if (has_cpu_feature (cpu_model, cpu_features2,
275 FEATURE_CLZERO))
276 {
277 cpu = "znver1";
278 CHECK___builtin_cpu_is ("znver1");
279 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER1;
280 }
281 break;
3e2ae3ee
VK
282 case 0x19:
283 cpu_model->__cpu_type = AMDFAM19H;
bf3b532b 284 /* AMD family 19h. */
3e2ae3ee
VK
285 if (model <= 0x0f)
286 {
287 cpu = "znver3";
288 CHECK___builtin_cpu_is ("znver3");
289 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER3;
290 }
bf3b532b
TJ
291 else if ((model >= 0x10 && model <= 0x1f)
292 || (model >= 0x60 && model <= 0xaf))
293 {
294 cpu = "znver4";
295 CHECK___builtin_cpu_is ("znver4");
296 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER4;
297 }
298 else if (has_cpu_feature (cpu_model, cpu_features2,
299 FEATURE_AVX512F))
300 {
301 cpu = "znver4";
302 CHECK___builtin_cpu_is ("znver4");
303 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER4;
304 }
3e2ae3ee
VK
305 else if (has_cpu_feature (cpu_model, cpu_features2,
306 FEATURE_VAES))
307 {
308 cpu = "znver3";
309 CHECK___builtin_cpu_is ("znver3");
310 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER3;
311 }
312 break;
1890f2f0
L
313 default:
314 break;
315 }
316
317 return cpu;
318}
319
320/* Get the specific type of Intel CPU and return Intel CPU name. Return
321 NULL for unknown Intel CPU. */
322
323static inline const char *
324get_intel_cpu (struct __processor_model *cpu_model,
325 struct __processor_model2 *cpu_model2,
134f7c94 326 unsigned int *cpu_features2)
1890f2f0
L
327{
328 const char *cpu = NULL;
329
134f7c94
L
330 /* Parse family and model only for model 6. */
331 if (cpu_model2->__cpu_family != 0x6)
1890f2f0
L
332 return cpu;
333
334 switch (cpu_model2->__cpu_model)
335 {
336 case 0x1c:
337 case 0x26:
338 /* Bonnell. */
339 cpu = "bonnell";
340 CHECK___builtin_cpu_is ("atom");
341 cpu_model->__cpu_type = INTEL_BONNELL;
342 break;
343 case 0x37:
344 case 0x4a:
345 case 0x4d:
346 case 0x5d:
347 /* Silvermont. */
348 case 0x4c:
349 case 0x5a:
350 case 0x75:
351 /* Airmont. */
352 cpu = "silvermont";
353 CHECK___builtin_cpu_is ("silvermont");
354 cpu_model->__cpu_type = INTEL_SILVERMONT;
355 break;
356 case 0x5c:
357 case 0x5f:
358 /* Goldmont. */
359 cpu = "goldmont";
360 CHECK___builtin_cpu_is ("goldmont");
361 cpu_model->__cpu_type = INTEL_GOLDMONT;
362 break;
363 case 0x7a:
364 /* Goldmont Plus. */
365 cpu = "goldmont-plus";
366 CHECK___builtin_cpu_is ("goldmont-plus");
367 cpu_model->__cpu_type = INTEL_GOLDMONT_PLUS;
368 break;
369 case 0x86:
370 case 0x96:
371 case 0x9c:
372 /* Tremont. */
373 cpu = "tremont";
374 CHECK___builtin_cpu_is ("tremont");
375 cpu_model->__cpu_type = INTEL_TREMONT;
376 break;
377 case 0x57:
378 /* Knights Landing. */
379 cpu = "knl";
380 CHECK___builtin_cpu_is ("knl");
381 cpu_model->__cpu_type = INTEL_KNL;
382 break;
383 case 0x85:
384 /* Knights Mill. */
385 cpu = "knm";
386 CHECK___builtin_cpu_is ("knm");
387 cpu_model->__cpu_type = INTEL_KNM;
388 break;
389 case 0x1a:
390 case 0x1e:
391 case 0x1f:
392 case 0x2e:
393 /* Nehalem. */
394 cpu = "nehalem";
395 CHECK___builtin_cpu_is ("corei7");
396 CHECK___builtin_cpu_is ("nehalem");
397 cpu_model->__cpu_type = INTEL_COREI7;
398 cpu_model->__cpu_subtype = INTEL_COREI7_NEHALEM;
399 break;
400 case 0x25:
401 case 0x2c:
402 case 0x2f:
403 /* Westmere. */
404 cpu = "westmere";
405 CHECK___builtin_cpu_is ("corei7");
406 CHECK___builtin_cpu_is ("westmere");
407 cpu_model->__cpu_type = INTEL_COREI7;
408 cpu_model->__cpu_subtype = INTEL_COREI7_WESTMERE;
409 break;
410 case 0x2a:
411 case 0x2d:
412 /* Sandy Bridge. */
413 cpu = "sandybridge";
414 CHECK___builtin_cpu_is ("corei7");
415 CHECK___builtin_cpu_is ("sandybridge");
416 cpu_model->__cpu_type = INTEL_COREI7;
417 cpu_model->__cpu_subtype = INTEL_COREI7_SANDYBRIDGE;
418 break;
419 case 0x3a:
420 case 0x3e:
421 /* Ivy Bridge. */
422 cpu = "ivybridge";
423 CHECK___builtin_cpu_is ("corei7");
424 CHECK___builtin_cpu_is ("ivybridge");
425 cpu_model->__cpu_type = INTEL_COREI7;
426 cpu_model->__cpu_subtype = INTEL_COREI7_IVYBRIDGE;
427 break;
428 case 0x3c:
429 case 0x3f:
430 case 0x45:
431 case 0x46:
432 /* Haswell. */
433 cpu = "haswell";
434 CHECK___builtin_cpu_is ("corei7");
435 CHECK___builtin_cpu_is ("haswell");
436 cpu_model->__cpu_type = INTEL_COREI7;
437 cpu_model->__cpu_subtype = INTEL_COREI7_HASWELL;
438 break;
439 case 0x3d:
440 case 0x47:
441 case 0x4f:
442 case 0x56:
443 /* Broadwell. */
444 cpu = "broadwell";
445 CHECK___builtin_cpu_is ("corei7");
446 CHECK___builtin_cpu_is ("broadwell");
447 cpu_model->__cpu_type = INTEL_COREI7;
448 cpu_model->__cpu_subtype = INTEL_COREI7_BROADWELL;
449 break;
450 case 0x4e:
451 case 0x5e:
452 /* Skylake. */
453 case 0x8e:
454 case 0x9e:
455 /* Kaby Lake. */
456 case 0xa5:
457 case 0xa6:
458 /* Comet Lake. */
459 cpu = "skylake";
460 CHECK___builtin_cpu_is ("corei7");
461 CHECK___builtin_cpu_is ("skylake");
462 cpu_model->__cpu_type = INTEL_COREI7;
463 cpu_model->__cpu_subtype = INTEL_COREI7_SKYLAKE;
464 break;
c02c39fa 465 case 0xa7:
4bd5297f 466 case 0xa8:
c02c39fa
CL
467 /* Rocket Lake. */
468 cpu = "rocketlake";
469 CHECK___builtin_cpu_is ("corei7");
470 CHECK___builtin_cpu_is ("rocketlake");
471 cpu_model->__cpu_type = INTEL_COREI7;
472 cpu_model->__cpu_subtype = INTEL_COREI7_ROCKETLAKE;
473 break;
1890f2f0
L
474 case 0x55:
475 CHECK___builtin_cpu_is ("corei7");
476 cpu_model->__cpu_type = INTEL_COREI7;
477 if (has_cpu_feature (cpu_model, cpu_features2,
403e166b
L
478 FEATURE_AVX512BF16))
479 {
480 /* Cooper Lake. */
481 cpu = "cooperlake";
482 CHECK___builtin_cpu_is ("cooperlake");
483 cpu_model->__cpu_subtype = INTEL_COREI7_COOPERLAKE;
484 }
485 else if (has_cpu_feature (cpu_model, cpu_features2,
486 FEATURE_AVX512VNNI))
1890f2f0
L
487 {
488 /* Cascade Lake. */
489 cpu = "cascadelake";
490 CHECK___builtin_cpu_is ("cascadelake");
491 cpu_model->__cpu_subtype = INTEL_COREI7_CASCADELAKE;
492 }
493 else
494 {
495 /* Skylake with AVX-512 support. */
496 cpu = "skylake-avx512";
497 CHECK___builtin_cpu_is ("skylake-avx512");
498 cpu_model->__cpu_subtype = INTEL_COREI7_SKYLAKE_AVX512;
499 }
500 break;
501 case 0x66:
502 /* Cannon Lake. */
503 cpu = "cannonlake";
504 CHECK___builtin_cpu_is ("corei7");
505 CHECK___builtin_cpu_is ("cannonlake");
506 cpu_model->__cpu_type = INTEL_COREI7;
507 cpu_model->__cpu_subtype = INTEL_COREI7_CANNONLAKE;
508 break;
509 case 0x6a:
510 case 0x6c:
511 /* Ice Lake server. */
512 cpu = "icelake-server";
513 CHECK___builtin_cpu_is ("corei7");
514 CHECK___builtin_cpu_is ("icelake-server");
515 cpu_model->__cpu_type = INTEL_COREI7;
516 cpu_model->__cpu_subtype = INTEL_COREI7_ICELAKE_SERVER;
517 break;
518 case 0x7e:
519 case 0x7d:
520 case 0x9d:
521 /* Ice Lake client. */
522 cpu = "icelake-client";
523 CHECK___builtin_cpu_is ("corei7");
524 CHECK___builtin_cpu_is ("icelake-client");
525 cpu_model->__cpu_type = INTEL_COREI7;
526 cpu_model->__cpu_subtype = INTEL_COREI7_ICELAKE_CLIENT;
527 break;
528 case 0x8c:
529 case 0x8d:
530 /* Tiger Lake. */
531 cpu = "tigerlake";
532 CHECK___builtin_cpu_is ("corei7");
533 CHECK___builtin_cpu_is ("tigerlake");
534 cpu_model->__cpu_type = INTEL_COREI7;
535 cpu_model->__cpu_subtype = INTEL_COREI7_TIGERLAKE;
536 break;
708b3600 537 case 0x97:
f2be0833 538 case 0x9a:
4bd5297f 539 case 0xbf:
708b3600 540 /* Alder Lake. */
470a0659
HJ
541 case 0xb7:
542 /* Raptor Lake. */
fd206f0e
HL
543 case 0xb5:
544 case 0xaa:
545 case 0xac:
546 /* Meteor Lake. */
708b3600
L
547 cpu = "alderlake";
548 CHECK___builtin_cpu_is ("corei7");
549 CHECK___builtin_cpu_is ("alderlake");
550 cpu_model->__cpu_type = INTEL_COREI7;
551 cpu_model->__cpu_subtype = INTEL_COREI7_ALDERLAKE;
552 break;
ba9c87d3
CL
553 case 0x8f:
554 /* Sapphire Rapids. */
555 cpu = "sapphirerapids";
556 CHECK___builtin_cpu_is ("corei7");
557 CHECK___builtin_cpu_is ("sapphirerapids");
558 cpu_model->__cpu_type = INTEL_COREI7;
559 cpu_model->__cpu_subtype = INTEL_COREI7_SAPPHIRERAPIDS;
560 break;
1890f2f0
L
561 case 0x17:
562 case 0x1d:
563 /* Penryn. */
564 case 0x0f:
565 /* Merom. */
566 cpu = "core2";
567 CHECK___builtin_cpu_is ("core2");
568 cpu_model->__cpu_type = INTEL_CORE2;
569 break;
570 default:
571 break;
572 }
573
574 return cpu;
575}
576
a239aff8
M
577/* Get the specific type of ZHAOXIN CPU and return ZHAOXIN CPU name.
578 Return NULL for unknown ZHAOXIN CPU. */
579
580static inline const char *
581get_zhaoxin_cpu (struct __processor_model *cpu_model,
582 struct __processor_model2 *cpu_model2,
583 unsigned int *cpu_features2)
584{
585 const char *cpu = NULL;
586 unsigned int family = cpu_model2->__cpu_family;
587 unsigned int model = cpu_model2->__cpu_model;
588
589 switch (family)
590 {
591 /* ZHAOXIN family 7h. */
592 case 0x07:
593 cpu_model->__cpu_type = ZHAOXIN_FAM7H;
594 if (model == 0x3b)
595 {
d80b7744
ML
596 cpu = "lujiazui";
597 CHECK___builtin_cpu_is ("lujiazui");
598 reset_cpu_feature (cpu_model, cpu_features2, FEATURE_AVX);
599 reset_cpu_feature (cpu_model, cpu_features2, FEATURE_F16C);
600 cpu_model->__cpu_subtype = ZHAOXIN_FAM7H_LUJIAZUI;
a239aff8
M
601 }
602 break;
603 default:
604 break;
605 }
606
607 return cpu;
608}
609
1890f2f0
L
610/* ECX and EDX are output of CPUID at level one. */
611static inline void
612get_available_features (struct __processor_model *cpu_model,
613 struct __processor_model2 *cpu_model2,
614 unsigned int *cpu_features2,
615 unsigned int ecx, unsigned int edx)
616{
617 unsigned int max_cpuid_level = cpu_model2->__cpu_max_level;
618 unsigned int eax, ebx;
619 unsigned int ext_level;
620
621 /* Get XCR_XFEATURE_ENABLED_MASK register with xgetbv. */
622#define XCR_XFEATURE_ENABLED_MASK 0x0
623#define XSTATE_FP 0x1
624#define XSTATE_SSE 0x2
625#define XSTATE_YMM 0x4
626#define XSTATE_OPMASK 0x20
627#define XSTATE_ZMM 0x40
628#define XSTATE_HI_ZMM 0x80
5c609842 629#define XSTATE_TILECFG 0x20000
630#define XSTATE_TILEDATA 0x40000
1890f2f0
L
631
632#define XCR_AVX_ENABLED_MASK \
633 (XSTATE_SSE | XSTATE_YMM)
634#define XCR_AVX512F_ENABLED_MASK \
635 (XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM)
5c609842 636#define XCR_AMX_ENABLED_MASK \
637 (XSTATE_TILECFG | XSTATE_TILEDATA)
1890f2f0
L
638
639 /* Check if AVX and AVX512 are usable. */
640 int avx_usable = 0;
641 int avx512_usable = 0;
5c609842 642 int amx_usable = 0;
632a2f50 643 /* Check if KL is usable. */
644 int has_kl = 0;
1890f2f0
L
645 if ((ecx & bit_OSXSAVE))
646 {
647 /* Check if XMM, YMM, OPMASK, upper 256 bits of ZMM0-ZMM15 and
648 ZMM16-ZMM31 states are supported by OSXSAVE. */
649 unsigned int xcrlow;
650 unsigned int xcrhigh;
651 __asm__ (".byte 0x0f, 0x01, 0xd0"
652 : "=a" (xcrlow), "=d" (xcrhigh)
653 : "c" (XCR_XFEATURE_ENABLED_MASK));
654 if ((xcrlow & XCR_AVX_ENABLED_MASK) == XCR_AVX_ENABLED_MASK)
655 {
656 avx_usable = 1;
657 avx512_usable = ((xcrlow & XCR_AVX512F_ENABLED_MASK)
658 == XCR_AVX512F_ENABLED_MASK);
659 }
5c609842 660 amx_usable = ((xcrlow & XCR_AMX_ENABLED_MASK)
661 == XCR_AMX_ENABLED_MASK);
1890f2f0
L
662 }
663
664#define set_feature(f) \
665 set_cpu_feature (cpu_model, cpu_features2, f)
666
667 if (edx & bit_CMOV)
668 set_feature (FEATURE_CMOV);
669 if (edx & bit_MMX)
670 set_feature (FEATURE_MMX);
671 if (edx & bit_SSE)
672 set_feature (FEATURE_SSE);
673 if (edx & bit_SSE2)
674 set_feature (FEATURE_SSE2);
675 if (edx & bit_CMPXCHG8B)
676 set_feature (FEATURE_CMPXCHG8B);
677 if (edx & bit_FXSAVE)
678 set_feature (FEATURE_FXSAVE);
679
680 if (ecx & bit_POPCNT)
681 set_feature (FEATURE_POPCNT);
682 if (ecx & bit_AES)
683 set_feature (FEATURE_AES);
684 if (ecx & bit_PCLMUL)
685 set_feature (FEATURE_PCLMUL);
686 if (ecx & bit_SSE3)
687 set_feature (FEATURE_SSE3);
688 if (ecx & bit_SSSE3)
689 set_feature (FEATURE_SSSE3);
690 if (ecx & bit_SSE4_1)
691 set_feature (FEATURE_SSE4_1);
692 if (ecx & bit_SSE4_2)
693 set_feature (FEATURE_SSE4_2);
694 if (ecx & bit_OSXSAVE)
695 set_feature (FEATURE_OSXSAVE);
696 if (ecx & bit_CMPXCHG16B)
697 set_feature (FEATURE_CMPXCHG16B);
698 if (ecx & bit_MOVBE)
699 set_feature (FEATURE_MOVBE);
700 if (ecx & bit_AES)
701 set_feature (FEATURE_AES);
1890f2f0
L
702 if (ecx & bit_RDRND)
703 set_feature (FEATURE_RDRND);
704 if (ecx & bit_XSAVE)
705 set_feature (FEATURE_XSAVE);
706 if (avx_usable)
707 {
708 if (ecx & bit_AVX)
709 set_feature (FEATURE_AVX);
710 if (ecx & bit_FMA)
711 set_feature (FEATURE_FMA);
751f3066
L
712 if (ecx & bit_F16C)
713 set_feature (FEATURE_F16C);
1890f2f0
L
714 }
715
716 /* Get Advanced Features at level 7 (eax = 7, ecx = 0/1). */
717 if (max_cpuid_level >= 7)
718 {
719 __cpuid_count (7, 0, eax, ebx, ecx, edx);
720 if (ebx & bit_BMI)
721 set_feature (FEATURE_BMI);
722 if (ebx & bit_SGX)
723 set_feature (FEATURE_SGX);
724 if (ebx & bit_HLE)
725 set_feature (FEATURE_HLE);
726 if (ebx & bit_RTM)
727 set_feature (FEATURE_RTM);
728 if (avx_usable)
729 {
730 if (ebx & bit_AVX2)
731 set_feature (FEATURE_AVX2);
732 if (ecx & bit_VPCLMULQDQ)
733 set_feature (FEATURE_VPCLMULQDQ);
751f3066
L
734 if (ecx & bit_VAES)
735 set_feature (FEATURE_VAES);
1890f2f0
L
736 }
737 if (ebx & bit_BMI2)
738 set_feature (FEATURE_BMI2);
739 if (ebx & bit_FSGSBASE)
740 set_feature (FEATURE_FSGSBASE);
741 if (ebx & bit_RDSEED)
742 set_feature (FEATURE_RDSEED);
743 if (ebx & bit_ADX)
744 set_feature (FEATURE_ADX);
745 if (ebx & bit_SHA)
746 set_feature (FEATURE_SHA);
747 if (ebx & bit_CLFLUSHOPT)
748 set_feature (FEATURE_CLFLUSHOPT);
749 if (ebx & bit_CLWB)
750 set_feature (FEATURE_CLWB);
751 if (ecx & bit_PREFETCHWT1)
752 set_feature (FEATURE_PREFETCHWT1);
753 /* NB: bit_OSPKE indicates that OS supports PKU. */
754 if (ecx & bit_OSPKE)
755 set_feature (FEATURE_PKU);
756 if (ecx & bit_RDPID)
757 set_feature (FEATURE_RDPID);
1890f2f0
L
758 if (ecx & bit_GFNI)
759 set_feature (FEATURE_GFNI);
760 if (ecx & bit_MOVDIRI)
761 set_feature (FEATURE_MOVDIRI);
762 if (ecx & bit_MOVDIR64B)
763 set_feature (FEATURE_MOVDIR64B);
764 if (ecx & bit_ENQCMD)
765 set_feature (FEATURE_ENQCMD);
766 if (ecx & bit_CLDEMOTE)
767 set_feature (FEATURE_CLDEMOTE);
768 if (ecx & bit_WAITPKG)
769 set_feature (FEATURE_WAITPKG);
770 if (ecx & bit_SHSTK)
771 set_feature (FEATURE_SHSTK);
99d6ba6a
H
772 if (ecx & bit_KL)
773 has_kl = 1;
1890f2f0
L
774 if (edx & bit_SERIALIZE)
775 set_feature (FEATURE_SERIALIZE);
776 if (edx & bit_TSXLDTRK)
777 set_feature (FEATURE_TSXLDTRK);
778 if (edx & bit_PCONFIG)
779 set_feature (FEATURE_PCONFIG);
780 if (edx & bit_IBT)
781 set_feature (FEATURE_IBT);
99d6ba6a
H
782 if (edx & bit_UINTR)
783 set_feature (FEATURE_UINTR);
5c609842 784 if (amx_usable)
785 {
786 if (edx & bit_AMX_TILE)
787 set_feature (FEATURE_AMX_TILE);
788 if (edx & bit_AMX_INT8)
789 set_feature (FEATURE_AMX_INT8);
790 if (edx & bit_AMX_BF16)
791 set_feature (FEATURE_AMX_BF16);
792 }
1890f2f0
L
793 if (avx512_usable)
794 {
795 if (ebx & bit_AVX512F)
796 set_feature (FEATURE_AVX512F);
797 if (ebx & bit_AVX512VL)
798 set_feature (FEATURE_AVX512VL);
799 if (ebx & bit_AVX512BW)
800 set_feature (FEATURE_AVX512BW);
801 if (ebx & bit_AVX512DQ)
802 set_feature (FEATURE_AVX512DQ);
803 if (ebx & bit_AVX512CD)
804 set_feature (FEATURE_AVX512CD);
805 if (ebx & bit_AVX512PF)
806 set_feature (FEATURE_AVX512PF);
807 if (ebx & bit_AVX512ER)
808 set_feature (FEATURE_AVX512ER);
809 if (ebx & bit_AVX512IFMA)
810 set_feature (FEATURE_AVX512IFMA);
811 if (ecx & bit_AVX512VBMI)
812 set_feature (FEATURE_AVX512VBMI);
813 if (ecx & bit_AVX512VBMI2)
814 set_feature (FEATURE_AVX512VBMI2);
815 if (ecx & bit_AVX512VNNI)
816 set_feature (FEATURE_AVX512VNNI);
817 if (ecx & bit_AVX512BITALG)
818 set_feature (FEATURE_AVX512BITALG);
819 if (ecx & bit_AVX512VPOPCNTDQ)
820 set_feature (FEATURE_AVX512VPOPCNTDQ);
821 if (edx & bit_AVX5124VNNIW)
822 set_feature (FEATURE_AVX5124VNNIW);
823 if (edx & bit_AVX5124FMAPS)
824 set_feature (FEATURE_AVX5124FMAPS);
825 if (edx & bit_AVX512VP2INTERSECT)
826 set_feature (FEATURE_AVX512VP2INTERSECT);
a6841211
GX
827 if (edx & bit_AVX512FP16)
828 set_feature (FEATURE_AVX512FP16);
99d6ba6a 829 }
1890f2f0 830
99d6ba6a
H
831 __cpuid_count (7, 1, eax, ebx, ecx, edx);
832 if (eax & bit_HRESET)
833 set_feature (FEATURE_HRESET);
834 if (avx_usable)
835 {
ca813880 836 if (eax & bit_AVXVNNI)
837 set_feature (FEATURE_AVXVNNI);
825d0041
HW
838 if (eax & bit_AVXIFMA)
839 set_feature (FEATURE_AVXIFMA);
40667594
KL
840 if (edx & bit_AVXVNNIINT8)
841 set_feature (FEATURE_AVXVNNIINT8);
58685b93 842 if (edx & bit_AVXNECONVERT)
843 set_feature (FEATURE_AVXNECONVERT);
99d6ba6a
H
844 }
845 if (avx512_usable)
846 {
847 if (eax & bit_AVX512BF16)
848 set_feature (FEATURE_AVX512BF16);
1890f2f0
L
849 }
850 }
851
852 /* Get Advanced Features at level 0xd (eax = 0xd, ecx = 1). */
853 if (max_cpuid_level >= 0xd)
854 {
855 __cpuid_count (0xd, 1, eax, ebx, ecx, edx);
856 if (eax & bit_XSAVEOPT)
857 set_feature (FEATURE_XSAVEOPT);
858 if (eax & bit_XSAVEC)
859 set_feature (FEATURE_XSAVEC);
860 if (eax & bit_XSAVES)
861 set_feature (FEATURE_XSAVES);
862 }
863
864 /* Get Advanced Features at level 0x14 (eax = 0x14, ecx = 0). */
865 if (max_cpuid_level >= 0x14)
866 {
867 __cpuid_count (0x14, 0, eax, ebx, ecx, edx);
868 if (ebx & bit_PTWRITE)
869 set_feature (FEATURE_PTWRITE);
870 }
871
632a2f50 872 /* Get Advanced Features at level 0x19 (eax = 0x19). */
873 if (max_cpuid_level >= 0x19)
874 {
d0363a80 875 __cpuid (0x19, eax, ebx, ecx, edx);
632a2f50 876 /* Check if OS support keylocker. */
877 if (ebx & bit_AESKLE)
878 {
d0363a80 879 set_feature (FEATURE_AESKLE);
632a2f50 880 if (ebx & bit_WIDEKL)
881 set_feature (FEATURE_WIDEKL);
882 if (has_kl)
883 set_feature (FEATURE_KL);
884 }
885 }
886
1890f2f0
L
887 /* Check cpuid level of extended features. */
888 __cpuid (0x80000000, ext_level, ebx, ecx, edx);
889
890 cpu_model2->__cpu_ext_level = ext_level;
891
892 if (ext_level >= 0x80000001)
893 {
894 __cpuid (0x80000001, eax, ebx, ecx, edx);
895
896 if (ecx & bit_SSE4a)
897 set_feature (FEATURE_SSE4_A);
898 if (ecx & bit_LAHF_LM)
899 set_feature (FEATURE_LAHF_LM);
900 if (ecx & bit_ABM)
901 set_feature (FEATURE_ABM);
902 if (ecx & bit_LWP)
903 set_feature (FEATURE_LWP);
904 if (ecx & bit_TBM)
905 set_feature (FEATURE_TBM);
906 if (ecx & bit_LZCNT)
907 set_feature (FEATURE_LZCNT);
908 if (ecx & bit_PRFCHW)
909 set_feature (FEATURE_PRFCHW);
910 if (ecx & bit_MWAITX)
911 set_feature (FEATURE_MWAITX);
912
913 if (edx & bit_LM)
914 set_feature (FEATURE_LM);
915 if (edx & bit_3DNOWP)
916 set_feature (FEATURE_3DNOWP);
917 if (edx & bit_3DNOW)
918 set_feature (FEATURE_3DNOW);
919
920 if (avx_usable)
921 {
922 if (ecx & bit_FMA4)
923 set_feature (FEATURE_FMA4);
924 if (ecx & bit_XOP)
925 set_feature (FEATURE_XOP);
926 }
927 }
928
929 if (ext_level >= 0x80000008)
930 {
931 __cpuid (0x80000008, eax, ebx, ecx, edx);
932 if (ebx & bit_CLZERO)
933 set_feature (FEATURE_CLZERO);
934 if (ebx & bit_WBNOINVD)
935 set_feature (FEATURE_WBNOINVD);
936 }
937
938#undef set_feature
939}
940
941static inline int
942cpu_indicator_init (struct __processor_model *cpu_model,
943 struct __processor_model2 *cpu_model2,
944 unsigned int *cpu_features2)
945{
946 unsigned int eax, ebx, ecx, edx;
947
948 int max_level;
949 unsigned int vendor;
134f7c94 950 unsigned int model, family;
1890f2f0
L
951 unsigned int extended_model, extended_family;
952
953 /* This function needs to run just once. */
954 if (cpu_model->__cpu_vendor)
955 return 0;
956
957 /* Assume cpuid insn present. Run in level 0 to get vendor id. */
958 if (!__get_cpuid (0, &eax, &ebx, &ecx, &edx))
959 {
960 cpu_model->__cpu_vendor = VENDOR_OTHER;
961 return -1;
962 }
963
964 vendor = ebx;
965 max_level = eax;
966
967 if (max_level < 1)
968 {
969 cpu_model->__cpu_vendor = VENDOR_OTHER;
970 return -1;
971 }
972
973 if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx))
974 {
975 cpu_model->__cpu_vendor = VENDOR_OTHER;
976 return -1;
977 }
978
979 cpu_model2->__cpu_max_level = max_level;
980
981 model = (eax >> 4) & 0x0f;
982 family = (eax >> 8) & 0x0f;
1890f2f0
L
983 extended_model = (eax >> 12) & 0xf0;
984 extended_family = (eax >> 20) & 0xff;
985
986 if (vendor == signature_INTEL_ebx)
987 {
988 /* Adjust model and family for Intel CPUS. */
989 if (family == 0x0f)
990 {
991 family += extended_family;
992 model += extended_model;
993 }
994 else if (family == 0x06)
995 model += extended_model;
996
997 cpu_model2->__cpu_family = family;
998 cpu_model2->__cpu_model = model;
999
1000 /* Find available features. */
1001 get_available_features (cpu_model, cpu_model2, cpu_features2,
1002 ecx, edx);
1003 /* Get CPU type. */
134f7c94 1004 get_intel_cpu (cpu_model, cpu_model2, cpu_features2);
1890f2f0
L
1005 cpu_model->__cpu_vendor = VENDOR_INTEL;
1006 }
1007 else if (vendor == signature_AMD_ebx)
1008 {
1009 /* Adjust model and family for AMD CPUS. */
1010 if (family == 0x0f)
1011 {
1012 family += extended_family;
1013 model += extended_model;
1014 }
1015
1016 cpu_model2->__cpu_family = family;
1017 cpu_model2->__cpu_model = model;
1018
1019 /* Find available features. */
1020 get_available_features (cpu_model, cpu_model2, cpu_features2,
1021 ecx, edx);
1022 /* Get CPU type. */
1023 get_amd_cpu (cpu_model, cpu_model2, cpu_features2);
1024 cpu_model->__cpu_vendor = VENDOR_AMD;
1025 }
a239aff8 1026 else if (vendor == signature_CENTAUR_ebx && family < 0x07)
1890f2f0 1027 cpu_model->__cpu_vendor = VENDOR_CENTAUR;
a239aff8
M
1028 else if (vendor == signature_SHANGHAI_ebx
1029 || vendor == signature_CENTAUR_ebx)
1030 {
1031 /* Adjust model and family for ZHAOXIN CPUS. */
1032 if (family == 0x07)
1033 {
1034 model += extended_model;
1035 }
1036
1037 cpu_model2->__cpu_family = family;
1038 cpu_model2->__cpu_model = model;
1039
1040 /* Find available features. */
1041 get_available_features (cpu_model, cpu_model2, cpu_features2,
1042 ecx, edx);
1043 /* Get CPU type. */
1044 get_zhaoxin_cpu (cpu_model, cpu_model2,cpu_features2);
1045 cpu_model->__cpu_vendor = VENDOR_ZHAOXIN;
1046 }
1890f2f0
L
1047 else if (vendor == signature_CYRIX_ebx)
1048 cpu_model->__cpu_vendor = VENDOR_CYRIX;
1049 else if (vendor == signature_NSC_ebx)
1050 cpu_model->__cpu_vendor = VENDOR_NSC;
1051 else
1052 cpu_model->__cpu_vendor = VENDOR_OTHER;
1053
8ea29259
ML
1054 if (has_cpu_feature (cpu_model, cpu_features2, FEATURE_LM)
1055 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_SSE2))
1056 {
1057 CHECK___builtin_cpu_supports ("x86-64");
1058 set_cpu_feature (cpu_model, cpu_features2,
1059 FEATURE_X86_64_BASELINE);
1060 if (has_cpu_feature (cpu_model, cpu_features2, FEATURE_CMPXCHG16B)
1061 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_POPCNT)
1062 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_LAHF_LM)
1063 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_SSE4_2))
1064 {
1065 CHECK___builtin_cpu_supports ("x86-64-v2");
1066 set_cpu_feature (cpu_model, cpu_features2,
1067 FEATURE_X86_64_V2);
1068 if (has_cpu_feature (cpu_model, cpu_features2, FEATURE_AVX2)
1069 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_BMI)
1070 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_BMI2)
1071 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_F16C)
1072 && has_cpu_feature (cpu_model, cpu_features2, FEATURE_FMA)
1073 && has_cpu_feature (cpu_model, cpu_features2,
1074 FEATURE_LZCNT)
1075 && has_cpu_feature (cpu_model, cpu_features2,
1076 FEATURE_MOVBE))
1077 {
1078 CHECK___builtin_cpu_supports ("x86-64-v3");
1079 set_cpu_feature (cpu_model, cpu_features2,
1080 FEATURE_X86_64_V3);
1081 if (has_cpu_feature (cpu_model, cpu_features2,
1082 FEATURE_AVX512BW)
1083 && has_cpu_feature (cpu_model, cpu_features2,
1084 FEATURE_AVX512CD)
1085 && has_cpu_feature (cpu_model, cpu_features2,
1086 FEATURE_AVX512DQ)
1087 && has_cpu_feature (cpu_model, cpu_features2,
1088 FEATURE_AVX512VL))
1089 {
1090 CHECK___builtin_cpu_supports ("x86-64-v4");
1091 set_cpu_feature (cpu_model, cpu_features2,
1092 FEATURE_X86_64_V4);
1093 }
1094 }
1095 }
1096 }
1097
1890f2f0
L
1098 gcc_assert (cpu_model->__cpu_vendor < VENDOR_MAX);
1099 gcc_assert (cpu_model->__cpu_type < CPU_TYPE_MAX);
1100 gcc_assert (cpu_model->__cpu_subtype < CPU_SUBTYPE_MAX);
1101
1102 return 0;
1103}