1 /* Initialize CPU feature data.
2 This file is part of the GNU C Library.
3 Copyright (C) 2008-2020 Free Software Foundation, Inc.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include <cpu-features.h>
22 #include <libc-pointer-arith.h>
25 # define TUNABLE_NAMESPACE cpu
26 # include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */
27 # include <elf/dl-tunables.h>
29 extern void TUNABLE_CALLBACK (set_hwcaps
) (tunable_val_t
*)
33 extern void TUNABLE_CALLBACK (set_x86_ibt
) (tunable_val_t
*)
35 extern void TUNABLE_CALLBACK (set_x86_shstk
) (tunable_val_t
*)
45 update_usable (struct cpu_features
*cpu_features
)
47 /* Copy the cpuid bits to usable bits for CPU featuress whose usability
48 in user space can be detected without additonal OS support. */
49 CPU_FEATURE_SET_USABLE (cpu_features
, SSE3
);
50 CPU_FEATURE_SET_USABLE (cpu_features
, PCLMULQDQ
);
51 CPU_FEATURE_SET_USABLE (cpu_features
, SSSE3
);
52 CPU_FEATURE_SET_USABLE (cpu_features
, CMPXCHG16B
);
53 CPU_FEATURE_SET_USABLE (cpu_features
, SSE4_1
);
54 CPU_FEATURE_SET_USABLE (cpu_features
, SSE4_2
);
55 CPU_FEATURE_SET_USABLE (cpu_features
, MOVBE
);
56 CPU_FEATURE_SET_USABLE (cpu_features
, POPCNT
);
57 CPU_FEATURE_SET_USABLE (cpu_features
, AES
);
58 CPU_FEATURE_SET_USABLE (cpu_features
, OSXSAVE
);
59 CPU_FEATURE_SET_USABLE (cpu_features
, TSC
);
60 CPU_FEATURE_SET_USABLE (cpu_features
, CX8
);
61 CPU_FEATURE_SET_USABLE (cpu_features
, CMOV
);
62 CPU_FEATURE_SET_USABLE (cpu_features
, CLFSH
);
63 CPU_FEATURE_SET_USABLE (cpu_features
, MMX
);
64 CPU_FEATURE_SET_USABLE (cpu_features
, FXSR
);
65 CPU_FEATURE_SET_USABLE (cpu_features
, SSE
);
66 CPU_FEATURE_SET_USABLE (cpu_features
, SSE2
);
67 CPU_FEATURE_SET_USABLE (cpu_features
, HTT
);
68 CPU_FEATURE_SET_USABLE (cpu_features
, BMI1
);
69 CPU_FEATURE_SET_USABLE (cpu_features
, HLE
);
70 CPU_FEATURE_SET_USABLE (cpu_features
, BMI2
);
71 CPU_FEATURE_SET_USABLE (cpu_features
, ERMS
);
72 CPU_FEATURE_SET_USABLE (cpu_features
, RTM
);
73 CPU_FEATURE_SET_USABLE (cpu_features
, RDSEED
);
74 CPU_FEATURE_SET_USABLE (cpu_features
, ADX
);
75 CPU_FEATURE_SET_USABLE (cpu_features
, CLFLUSHOPT
);
76 CPU_FEATURE_SET_USABLE (cpu_features
, CLWB
);
77 CPU_FEATURE_SET_USABLE (cpu_features
, SHA
);
78 CPU_FEATURE_SET_USABLE (cpu_features
, PREFETCHWT1
);
79 CPU_FEATURE_SET_USABLE (cpu_features
, OSPKE
);
80 CPU_FEATURE_SET_USABLE (cpu_features
, WAITPKG
);
81 CPU_FEATURE_SET_USABLE (cpu_features
, GFNI
);
82 CPU_FEATURE_SET_USABLE (cpu_features
, RDPID
);
83 CPU_FEATURE_SET_USABLE (cpu_features
, CLDEMOTE
);
84 CPU_FEATURE_SET_USABLE (cpu_features
, MOVDIRI
);
85 CPU_FEATURE_SET_USABLE (cpu_features
, MOVDIR64B
);
86 CPU_FEATURE_SET_USABLE (cpu_features
, FSRM
);
87 CPU_FEATURE_SET_USABLE (cpu_features
, SERIALIZE
);
88 CPU_FEATURE_SET_USABLE (cpu_features
, TSXLDTRK
);
89 CPU_FEATURE_SET_USABLE (cpu_features
, LAHF64_SAHF64
);
90 CPU_FEATURE_SET_USABLE (cpu_features
, LZCNT
);
91 CPU_FEATURE_SET_USABLE (cpu_features
, SSE4A
);
92 CPU_FEATURE_SET_USABLE (cpu_features
, PREFETCHW
);
93 CPU_FEATURE_SET_USABLE (cpu_features
, TBM
);
94 CPU_FEATURE_SET_USABLE (cpu_features
, RDTSCP
);
95 CPU_FEATURE_SET_USABLE (cpu_features
, WBNOINVD
);
96 CPU_FEATURE_SET_USABLE (cpu_features
, FZLRM
);
97 CPU_FEATURE_SET_USABLE (cpu_features
, FSRS
);
98 CPU_FEATURE_SET_USABLE (cpu_features
, FSRCS
);
100 /* Can we call xgetbv? */
101 if (CPU_FEATURES_CPU_P (cpu_features
, OSXSAVE
))
104 unsigned int xcrhigh
;
105 asm ("xgetbv" : "=a" (xcrlow
), "=d" (xcrhigh
) : "c" (0));
106 /* Is YMM and XMM state usable? */
107 if ((xcrlow
& (bit_YMM_state
| bit_XMM_state
))
108 == (bit_YMM_state
| bit_XMM_state
))
110 /* Determine if AVX is usable. */
111 if (CPU_FEATURES_CPU_P (cpu_features
, AVX
))
113 CPU_FEATURE_SET (cpu_features
, AVX
);
114 /* The following features depend on AVX being usable. */
115 /* Determine if AVX2 is usable. */
116 if (CPU_FEATURES_CPU_P (cpu_features
, AVX2
))
118 CPU_FEATURE_SET (cpu_features
, AVX2
);
120 /* Unaligned load with 256-bit AVX registers are faster
121 on Intel/AMD processors with AVX2. */
122 cpu_features
->preferred
[index_arch_AVX_Fast_Unaligned_Load
]
123 |= bit_arch_AVX_Fast_Unaligned_Load
;
125 /* Determine if AVX-VNNI is usable. */
126 CPU_FEATURE_SET_USABLE (cpu_features
, AVX_VNNI
);
127 /* Determine if FMA is usable. */
128 CPU_FEATURE_SET_USABLE (cpu_features
, FMA
);
129 /* Determine if VAES is usable. */
130 CPU_FEATURE_SET_USABLE (cpu_features
, VAES
);
131 /* Determine if VPCLMULQDQ is usable. */
132 CPU_FEATURE_SET_USABLE (cpu_features
, VPCLMULQDQ
);
133 /* Determine if XOP is usable. */
134 CPU_FEATURE_SET_USABLE (cpu_features
, XOP
);
135 /* Determine if F16C is usable. */
136 CPU_FEATURE_SET_USABLE (cpu_features
, F16C
);
139 /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and
140 ZMM16-ZMM31 state are enabled. */
141 if ((xcrlow
& (bit_Opmask_state
| bit_ZMM0_15_state
142 | bit_ZMM16_31_state
))
143 == (bit_Opmask_state
| bit_ZMM0_15_state
| bit_ZMM16_31_state
))
145 /* Determine if AVX512F is usable. */
146 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512F
))
148 CPU_FEATURE_SET (cpu_features
, AVX512F
);
149 /* Determine if AVX512CD is usable. */
150 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512CD
);
151 /* Determine if AVX512ER is usable. */
152 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512ER
);
153 /* Determine if AVX512PF is usable. */
154 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512PF
);
155 /* Determine if AVX512VL is usable. */
156 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512VL
);
157 /* Determine if AVX512DQ is usable. */
158 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512DQ
);
159 /* Determine if AVX512BW is usable. */
160 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512BW
);
161 /* Determine if AVX512_4FMAPS is usable. */
162 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_4FMAPS
);
163 /* Determine if AVX512_4VNNIW is usable. */
164 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_4VNNIW
);
165 /* Determine if AVX512_BITALG is usable. */
166 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_BITALG
);
167 /* Determine if AVX512_IFMA is usable. */
168 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_IFMA
);
169 /* Determine if AVX512_VBMI is usable. */
170 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_VBMI
);
171 /* Determine if AVX512_VBMI2 is usable. */
172 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_VBMI2
);
173 /* Determine if is AVX512_VNNI usable. */
174 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_VNNI
);
175 /* Determine if AVX512_VPOPCNTDQ is usable. */
176 CPU_FEATURE_SET_USABLE (cpu_features
,
178 /* Determine if AVX512_VP2INTERSECT is usable. */
179 CPU_FEATURE_SET_USABLE (cpu_features
,
180 AVX512_VP2INTERSECT
);
181 /* Determine if AVX512_BF16 is usable. */
182 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_BF16
);
183 /* Determine if AVX512_FP16 is usable. */
184 CPU_FEATURE_SET_USABLE (cpu_features
, AVX512_FP16
);
189 /* Are XTILECFG and XTILEDATA states usable? */
190 if ((xcrlow
& (bit_XTILECFG_state
| bit_XTILEDATA_state
))
191 == (bit_XTILECFG_state
| bit_XTILEDATA_state
))
193 /* Determine if AMX_BF16 is usable. */
194 CPU_FEATURE_SET_USABLE (cpu_features
, AMX_BF16
);
195 /* Determine if AMX_TILE is usable. */
196 CPU_FEATURE_SET_USABLE (cpu_features
, AMX_TILE
);
197 /* Determine if AMX_INT8 is usable. */
198 CPU_FEATURE_SET_USABLE (cpu_features
, AMX_INT8
);
201 /* These features are usable only when OSXSAVE is enabled. */
202 CPU_FEATURE_SET (cpu_features
, XSAVE
);
203 CPU_FEATURE_SET_USABLE (cpu_features
, XSAVEOPT
);
204 CPU_FEATURE_SET_USABLE (cpu_features
, XSAVEC
);
205 CPU_FEATURE_SET_USABLE (cpu_features
, XGETBV_ECX_1
);
206 CPU_FEATURE_SET_USABLE (cpu_features
, XFD
);
208 /* For _dl_runtime_resolve, set xsave_state_size to xsave area
209 size + integer register save size and align it to 64 bytes. */
210 if (cpu_features
->basic
.max_cpuid
>= 0xd)
212 unsigned int eax
, ebx
, ecx
, edx
;
214 __cpuid_count (0xd, 0, eax
, ebx
, ecx
, edx
);
217 unsigned int xsave_state_full_size
218 = ALIGN_UP (ebx
+ STATE_SAVE_OFFSET
, 64);
220 cpu_features
->xsave_state_size
221 = xsave_state_full_size
;
222 cpu_features
->xsave_state_full_size
223 = xsave_state_full_size
;
225 /* Check if XSAVEC is available. */
226 if (CPU_FEATURES_CPU_P (cpu_features
, XSAVEC
))
228 unsigned int xstate_comp_offsets
[32];
229 unsigned int xstate_comp_sizes
[32];
232 xstate_comp_offsets
[0] = 0;
233 xstate_comp_offsets
[1] = 160;
234 xstate_comp_offsets
[2] = 576;
235 xstate_comp_sizes
[0] = 160;
236 xstate_comp_sizes
[1] = 256;
238 for (i
= 2; i
< 32; i
++)
240 if ((STATE_SAVE_MASK
& (1 << i
)) != 0)
242 __cpuid_count (0xd, i
, eax
, ebx
, ecx
, edx
);
243 xstate_comp_sizes
[i
] = eax
;
248 xstate_comp_sizes
[i
] = 0;
253 xstate_comp_offsets
[i
]
254 = (xstate_comp_offsets
[i
- 1]
255 + xstate_comp_sizes
[i
-1]);
256 if ((ecx
& (1 << 1)) != 0)
257 xstate_comp_offsets
[i
]
258 = ALIGN_UP (xstate_comp_offsets
[i
], 64);
264 = xstate_comp_offsets
[31] + xstate_comp_sizes
[31];
267 cpu_features
->xsave_state_size
268 = ALIGN_UP (size
+ STATE_SAVE_OFFSET
, 64);
269 CPU_FEATURE_SET (cpu_features
, XSAVEC
);
276 /* Determine if PKU is usable. */
277 if (CPU_FEATURES_CPU_P (cpu_features
, OSPKE
))
278 CPU_FEATURE_SET (cpu_features
, PKU
);
280 /* Determine if Key Locker instructions are usable. */
281 if (CPU_FEATURES_CPU_P (cpu_features
, AESKLE
))
283 CPU_FEATURE_SET (cpu_features
, AESKLE
);
284 CPU_FEATURE_SET_USABLE (cpu_features
, KL
);
285 CPU_FEATURE_SET_USABLE (cpu_features
, WIDE_KL
);
290 get_extended_indices (struct cpu_features
*cpu_features
)
292 unsigned int eax
, ebx
, ecx
, edx
;
293 __cpuid (0x80000000, eax
, ebx
, ecx
, edx
);
294 if (eax
>= 0x80000001)
296 cpu_features
->features
[COMMON_CPUID_INDEX_80000001
].cpuid
.eax
,
297 cpu_features
->features
[COMMON_CPUID_INDEX_80000001
].cpuid
.ebx
,
298 cpu_features
->features
[COMMON_CPUID_INDEX_80000001
].cpuid
.ecx
,
299 cpu_features
->features
[COMMON_CPUID_INDEX_80000001
].cpuid
.edx
);
300 if (eax
>= 0x80000007)
302 cpu_features
->features
[COMMON_CPUID_INDEX_80000007
].cpuid
.eax
,
303 cpu_features
->features
[COMMON_CPUID_INDEX_80000007
].cpuid
.ebx
,
304 cpu_features
->features
[COMMON_CPUID_INDEX_80000007
].cpuid
.ecx
,
305 cpu_features
->features
[COMMON_CPUID_INDEX_80000007
].cpuid
.edx
);
306 if (eax
>= 0x80000008)
308 cpu_features
->features
[COMMON_CPUID_INDEX_80000008
].cpuid
.eax
,
309 cpu_features
->features
[COMMON_CPUID_INDEX_80000008
].cpuid
.ebx
,
310 cpu_features
->features
[COMMON_CPUID_INDEX_80000008
].cpuid
.ecx
,
311 cpu_features
->features
[COMMON_CPUID_INDEX_80000008
].cpuid
.edx
);
315 get_common_indices (struct cpu_features
*cpu_features
,
316 unsigned int *family
, unsigned int *model
,
317 unsigned int *extended_model
, unsigned int *stepping
)
323 cpu_features
->features
[COMMON_CPUID_INDEX_1
].cpuid
.ebx
,
324 cpu_features
->features
[COMMON_CPUID_INDEX_1
].cpuid
.ecx
,
325 cpu_features
->features
[COMMON_CPUID_INDEX_1
].cpuid
.edx
);
326 cpu_features
->features
[COMMON_CPUID_INDEX_1
].cpuid
.eax
= eax
;
327 *family
= (eax
>> 8) & 0x0f;
328 *model
= (eax
>> 4) & 0x0f;
329 *extended_model
= (eax
>> 12) & 0xf0;
330 *stepping
= eax
& 0x0f;
333 *family
+= (eax
>> 20) & 0xff;
334 *model
+= *extended_model
;
338 if (cpu_features
->basic
.max_cpuid
>= 7)
341 cpu_features
->features
[COMMON_CPUID_INDEX_7
].cpuid
.eax
,
342 cpu_features
->features
[COMMON_CPUID_INDEX_7
].cpuid
.ebx
,
343 cpu_features
->features
[COMMON_CPUID_INDEX_7
].cpuid
.ecx
,
344 cpu_features
->features
[COMMON_CPUID_INDEX_7
].cpuid
.edx
);
346 cpu_features
->features
[COMMON_CPUID_INDEX_7_ECX_1
].cpuid
.eax
,
347 cpu_features
->features
[COMMON_CPUID_INDEX_7_ECX_1
].cpuid
.ebx
,
348 cpu_features
->features
[COMMON_CPUID_INDEX_7_ECX_1
].cpuid
.ecx
,
349 cpu_features
->features
[COMMON_CPUID_INDEX_7_ECX_1
].cpuid
.edx
);
352 if (cpu_features
->basic
.max_cpuid
>= 0xd)
353 __cpuid_count (0xd, 1,
354 cpu_features
->features
[COMMON_CPUID_INDEX_D_ECX_1
].cpuid
.eax
,
355 cpu_features
->features
[COMMON_CPUID_INDEX_D_ECX_1
].cpuid
.ebx
,
356 cpu_features
->features
[COMMON_CPUID_INDEX_D_ECX_1
].cpuid
.ecx
,
357 cpu_features
->features
[COMMON_CPUID_INDEX_D_ECX_1
].cpuid
.edx
);
359 if (cpu_features
->basic
.max_cpuid
>= 0x19)
360 __cpuid_count (0x19, 0,
361 cpu_features
->features
[COMMON_CPUID_INDEX_19
].cpuid
.eax
,
362 cpu_features
->features
[COMMON_CPUID_INDEX_19
].cpuid
.ebx
,
363 cpu_features
->features
[COMMON_CPUID_INDEX_19
].cpuid
.ecx
,
364 cpu_features
->features
[COMMON_CPUID_INDEX_19
].cpuid
.edx
);
367 _Static_assert (((index_arch_Fast_Unaligned_Load
368 == index_arch_Fast_Unaligned_Copy
)
369 && (index_arch_Fast_Unaligned_Load
370 == index_arch_Prefer_PMINUB_for_stringop
)
371 && (index_arch_Fast_Unaligned_Load
372 == index_arch_Slow_SSE4_2
)
373 && (index_arch_Fast_Unaligned_Load
374 == index_arch_Fast_Rep_String
)
375 && (index_arch_Fast_Unaligned_Load
376 == index_arch_Fast_Copy_Backward
)),
377 "Incorrect index_arch_Fast_Unaligned_Load");
380 init_cpu_features (struct cpu_features
*cpu_features
)
382 unsigned int ebx
, ecx
, edx
;
383 unsigned int family
= 0;
384 unsigned int model
= 0;
385 unsigned int stepping
= 0;
386 enum cpu_features_kind kind
;
389 if (__get_cpuid_max (0, 0) == 0)
391 kind
= arch_kind_other
;
396 __cpuid (0, cpu_features
->basic
.max_cpuid
, ebx
, ecx
, edx
);
398 /* This spells out "GenuineIntel". */
399 if (ebx
== 0x756e6547 && ecx
== 0x6c65746e && edx
== 0x49656e69)
401 unsigned int extended_model
;
403 kind
= arch_kind_intel
;
405 get_common_indices (cpu_features
, &family
, &model
, &extended_model
,
408 get_extended_indices (cpu_features
);
410 update_usable (cpu_features
);
414 model
+= extended_model
;
419 /* BSF is slow on Atom. */
420 cpu_features
->preferred
[index_arch_Slow_BSF
]
421 |= bit_arch_Slow_BSF
;
425 /* Knights Landing. Enable Silvermont optimizations. */
428 /* Unaligned load versions are faster than SSSE3
433 /* Unaligned load versions are faster than SSSE3
439 /* Airmont is a die shrink of Silvermont. */
445 /* Unaligned load versions are faster than SSSE3
447 cpu_features
->preferred
[index_arch_Fast_Unaligned_Load
]
448 |= (bit_arch_Fast_Unaligned_Load
449 | bit_arch_Fast_Unaligned_Copy
450 | bit_arch_Prefer_PMINUB_for_stringop
451 | bit_arch_Slow_SSE4_2
);
457 /* Enable rep string instructions, unaligned load, unaligned
458 copy, pminub and avoid SSE 4.2 on Tremont. */
459 cpu_features
->preferred
[index_arch_Fast_Rep_String
]
460 |= (bit_arch_Fast_Rep_String
461 | bit_arch_Fast_Unaligned_Load
462 | bit_arch_Fast_Unaligned_Copy
463 | bit_arch_Prefer_PMINUB_for_stringop
464 | bit_arch_Slow_SSE4_2
);
468 /* Unknown family 0x06 processors. Assuming this is one
469 of Core i3/i5/i7 processors if AVX is available. */
470 if (!CPU_FEATURES_CPU_P (cpu_features
, AVX
))
481 /* Rep string instructions, unaligned load, unaligned copy,
482 and pminub are fast on Intel Core i3, i5 and i7. */
483 cpu_features
->preferred
[index_arch_Fast_Rep_String
]
484 |= (bit_arch_Fast_Rep_String
485 | bit_arch_Fast_Unaligned_Load
486 | bit_arch_Fast_Unaligned_Copy
487 | bit_arch_Prefer_PMINUB_for_stringop
);
491 /* Disable TSX on some Haswell processors to avoid TSX on kernels that
492 weren't updated with the latest microcode package (which disables
493 broken feature by default). */
497 /* Xeon E7 v3 with stepping >= 4 has working TSX. */
504 /* Disable Intel TSX on Haswell processors (except Xeon E7 v3
505 with stepping >= 4) to avoid TSX on kernels that weren't
506 updated with the latest microcode package (which disables
507 broken feature by default). */
508 CPU_FEATURE_UNSET (cpu_features
, RTM
);
514 /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER
515 if AVX512ER is available. Don't use AVX512 to avoid lower CPU
516 frequency if AVX512ER isn't available. */
517 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512ER
))
518 cpu_features
->preferred
[index_arch_Prefer_No_VZEROUPPER
]
519 |= bit_arch_Prefer_No_VZEROUPPER
;
521 cpu_features
->preferred
[index_arch_Prefer_No_AVX512
]
522 |= bit_arch_Prefer_No_AVX512
;
524 /* This spells out "AuthenticAMD" or "HygonGenuine". */
525 else if ((ebx
== 0x68747541 && ecx
== 0x444d4163 && edx
== 0x69746e65)
526 || (ebx
== 0x6f677948 && ecx
== 0x656e6975 && edx
== 0x6e65476e))
528 unsigned int extended_model
;
530 kind
= arch_kind_amd
;
532 get_common_indices (cpu_features
, &family
, &model
, &extended_model
,
535 get_extended_indices (cpu_features
);
537 update_usable (cpu_features
);
539 ecx
= cpu_features
->features
[COMMON_CPUID_INDEX_1
].cpuid
.ecx
;
541 if (CPU_FEATURE_USABLE_P (cpu_features
, AVX
))
543 /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and
544 FMA4 requires AVX, determine if FMA4 is usable here. */
545 CPU_FEATURE_SET_USABLE (cpu_features
, FMA4
);
551 if (model
>= 0x60 && model
<= 0x7f)
553 cpu_features
->preferred
[index_arch_Fast_Unaligned_Load
]
554 |= (bit_arch_Fast_Unaligned_Load
555 | bit_arch_Fast_Copy_Backward
);
557 /* Unaligned AVX loads are slower.*/
558 cpu_features
->preferred
[index_arch_AVX_Fast_Unaligned_Load
]
559 &= ~bit_arch_AVX_Fast_Unaligned_Load
;
563 /* This spells out "CentaurHauls" or " Shanghai ". */
564 else if ((ebx
== 0x746e6543 && ecx
== 0x736c7561 && edx
== 0x48727561)
565 || (ebx
== 0x68532020 && ecx
== 0x20206961 && edx
== 0x68676e61))
567 unsigned int extended_model
, stepping
;
569 kind
= arch_kind_zhaoxin
;
571 get_common_indices (cpu_features
, &family
, &model
, &extended_model
,
574 get_extended_indices (cpu_features
);
576 update_usable (cpu_features
);
578 model
+= extended_model
;
581 if (model
== 0xf || model
== 0x19)
583 CPU_FEATURE_UNSET (cpu_features
, AVX
);
584 CPU_FEATURE_UNSET (cpu_features
, AVX2
);
586 cpu_features
->preferred
[index_arch_Slow_SSE4_2
]
587 |= bit_arch_Slow_SSE4_2
;
589 cpu_features
->preferred
[index_arch_AVX_Fast_Unaligned_Load
]
590 &= ~bit_arch_AVX_Fast_Unaligned_Load
;
593 else if (family
== 0x7)
597 CPU_FEATURE_UNSET (cpu_features
, AVX
);
598 CPU_FEATURE_UNSET (cpu_features
, AVX2
);
600 cpu_features
->preferred
[index_arch_Slow_SSE4_2
]
601 |= bit_arch_Slow_SSE4_2
;
603 cpu_features
->preferred
[index_arch_AVX_Fast_Unaligned_Load
]
604 &= ~bit_arch_AVX_Fast_Unaligned_Load
;
606 else if (model
== 0x3b)
608 CPU_FEATURE_UNSET (cpu_features
, AVX
);
609 CPU_FEATURE_UNSET (cpu_features
, AVX2
);
611 cpu_features
->preferred
[index_arch_AVX_Fast_Unaligned_Load
]
612 &= ~bit_arch_AVX_Fast_Unaligned_Load
;
618 kind
= arch_kind_other
;
619 get_common_indices (cpu_features
, NULL
, NULL
, NULL
, NULL
);
620 update_usable (cpu_features
);
623 /* Support i586 if CX8 is available. */
624 if (CPU_FEATURES_CPU_P (cpu_features
, CX8
))
625 cpu_features
->preferred
[index_arch_I586
] |= bit_arch_I586
;
627 /* Support i686 if CMOV is available. */
628 if (CPU_FEATURES_CPU_P (cpu_features
, CMOV
))
629 cpu_features
->preferred
[index_arch_I686
] |= bit_arch_I686
;
635 cpu_features
->basic
.kind
= kind
;
636 cpu_features
->basic
.family
= family
;
637 cpu_features
->basic
.model
= model
;
638 cpu_features
->basic
.stepping
= stepping
;
641 TUNABLE_GET (hwcaps
, tunable_val_t
*, TUNABLE_CALLBACK (set_hwcaps
));
642 cpu_features
->non_temporal_threshold
643 = TUNABLE_GET (x86_non_temporal_threshold
, long int, NULL
);
644 cpu_features
->rep_movsb_threshold
645 = TUNABLE_GET (x86_rep_movsb_threshold
, long int, NULL
);
646 cpu_features
->rep_stosb_threshold
647 = TUNABLE_GET (x86_rep_stosb_threshold
, long int, NULL
);
648 cpu_features
->data_cache_size
649 = TUNABLE_GET (x86_data_cache_size
, long int, NULL
);
650 cpu_features
->shared_cache_size
651 = TUNABLE_GET (x86_shared_cache_size
, long int, NULL
);
654 /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */
655 #if !HAVE_TUNABLES && defined SHARED
656 /* The glibc.cpu.hwcap_mask tunable is initialized already, so no need to do
658 GLRO(dl_hwcap_mask
) = HWCAP_IMPORTANT
;
662 GLRO(dl_hwcap
) = HWCAP_X86_64
;
663 if (cpu_features
->basic
.kind
== arch_kind_intel
)
665 const char *platform
= NULL
;
667 if (CPU_FEATURE_USABLE_P (cpu_features
, AVX512CD
))
669 if (CPU_FEATURE_USABLE_P (cpu_features
, AVX512ER
))
671 if (CPU_FEATURE_USABLE_P (cpu_features
, AVX512PF
))
672 platform
= "xeon_phi";
676 if (CPU_FEATURE_USABLE_P (cpu_features
, AVX512BW
)
677 && CPU_FEATURE_USABLE_P (cpu_features
, AVX512DQ
)
678 && CPU_FEATURE_USABLE_P (cpu_features
, AVX512VL
))
679 GLRO(dl_hwcap
) |= HWCAP_X86_AVX512_1
;
684 && CPU_FEATURE_USABLE_P (cpu_features
, AVX2
)
685 && CPU_FEATURE_USABLE_P (cpu_features
, FMA
)
686 && CPU_FEATURE_USABLE_P (cpu_features
, BMI1
)
687 && CPU_FEATURE_USABLE_P (cpu_features
, BMI2
)
688 && CPU_FEATURE_USABLE_P (cpu_features
, LZCNT
)
689 && CPU_FEATURE_USABLE_P (cpu_features
, MOVBE
)
690 && CPU_FEATURE_USABLE_P (cpu_features
, POPCNT
))
691 platform
= "haswell";
693 if (platform
!= NULL
)
694 GLRO(dl_platform
) = platform
;
698 if (CPU_FEATURE_USABLE_P (cpu_features
, SSE2
))
699 GLRO(dl_hwcap
) |= HWCAP_X86_SSE2
;
701 if (CPU_FEATURES_ARCH_P (cpu_features
, I686
))
702 GLRO(dl_platform
) = "i686";
703 else if (CPU_FEATURES_ARCH_P (cpu_features
, I586
))
704 GLRO(dl_platform
) = "i586";
709 TUNABLE_GET (x86_ibt
, tunable_val_t
*,
710 TUNABLE_CALLBACK (set_x86_ibt
));
711 TUNABLE_GET (x86_shstk
, tunable_val_t
*,
712 TUNABLE_CALLBACK (set_x86_shstk
));
715 /* Check CET status. */
716 unsigned int cet_status
= get_cet_status ();
720 GL(dl_x86_feature_1
) = cet_status
;
723 /* Check if IBT and SHSTK are enabled by kernel. */
724 if ((cet_status
& GNU_PROPERTY_X86_FEATURE_1_IBT
)
725 || (cet_status
& GNU_PROPERTY_X86_FEATURE_1_SHSTK
))
727 /* Disable IBT and/or SHSTK if they are enabled by kernel, but
728 disabled by environment variable:
730 GLIBC_TUNABLES=glibc.cpu.hwcaps=-IBT,-SHSTK
732 unsigned int cet_feature
= 0;
733 if (!HAS_CPU_FEATURE (IBT
))
734 cet_feature
|= GNU_PROPERTY_X86_FEATURE_1_IBT
;
735 if (!HAS_CPU_FEATURE (SHSTK
))
736 cet_feature
|= GNU_PROPERTY_X86_FEATURE_1_SHSTK
;
740 int res
= dl_cet_disable_cet (cet_feature
);
742 /* Clear the disabled bits in dl_x86_feature_1. */
744 GL(dl_x86_feature_1
) &= ~cet_feature
;
747 /* Lock CET if IBT or SHSTK is enabled in executable. Don't
748 lock CET if IBT or SHSTK is enabled permissively. */
749 if (GL(dl_x86_feature_control
).ibt
!= cet_permissive
750 && GL(dl_x86_feature_control
).shstk
!= cet_permissive
)