]>
Commit | Line | Data |
---|---|---|
425ce2ed UD |
1 | /* Initialize CPU feature data. |
2 | This file is part of the GNU C Library. | |
688903eb | 3 | Copyright (C) 2008-2018 Free Software Foundation, Inc. |
425ce2ed UD |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ | |
425ce2ed | 18 | |
6f6f1215 | 19 | #include <cpuid.h> |
e2e4f560 | 20 | #include <cpu-features.h> |
1432d38e | 21 | #include <dl-hwcap.h> |
b52b0d79 | 22 | #include <libc-pointer-arith.h> |
425ce2ed | 23 | |
905947c3 | 24 | #if HAVE_TUNABLES |
dce452dc | 25 | # define TUNABLE_NAMESPACE cpu |
905947c3 L |
26 | # include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */ |
27 | # include <elf/dl-tunables.h> | |
28 | ||
03feacb5 | 29 | extern void TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *) |
905947c3 | 30 | attribute_hidden; |
f753fa7d L |
31 | |
32 | # if CET_ENABLED | |
f753fa7d L |
33 | extern void TUNABLE_CALLBACK (set_x86_ibt) (tunable_val_t *) |
34 | attribute_hidden; | |
35 | extern void TUNABLE_CALLBACK (set_x86_shstk) (tunable_val_t *) | |
36 | attribute_hidden; | |
37 | # endif | |
905947c3 L |
38 | #endif |
39 | ||
ba2ea23d L |
40 | #if CET_ENABLED |
41 | # include <dl-cet.h> | |
42 | # include <cet-tunables.h> | |
43 | #endif | |
44 | ||
be525a69 L |
45 | static void |
46 | get_extended_indices (struct cpu_features *cpu_features) | |
47 | { | |
48 | unsigned int eax, ebx, ecx, edx; | |
49 | __cpuid (0x80000000, eax, ebx, ecx, edx); | |
50 | if (eax >= 0x80000001) | |
51 | __cpuid (0x80000001, | |
52 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].eax, | |
53 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ebx, | |
54 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ecx, | |
55 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].edx); | |
56 | ||
57 | } | |
58 | ||
f781a9e9 | 59 | static void |
82c80ac2 | 60 | get_common_indices (struct cpu_features *cpu_features, |
9627da32 | 61 | unsigned int *family, unsigned int *model, |
2702856b | 62 | unsigned int *extended_model, unsigned int *stepping) |
01812913 | 63 | { |
f781a9e9 | 64 | if (family) |
9627da32 | 65 | { |
f781a9e9 L |
66 | unsigned int eax; |
67 | __cpuid (1, eax, cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx, | |
68 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx, | |
69 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].edx); | |
70 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].eax = eax; | |
71 | *family = (eax >> 8) & 0x0f; | |
72 | *model = (eax >> 4) & 0x0f; | |
73 | *extended_model = (eax >> 12) & 0xf0; | |
2702856b | 74 | *stepping = eax & 0x0f; |
f781a9e9 L |
75 | if (*family == 0x0f) |
76 | { | |
77 | *family += (eax >> 20) & 0xff; | |
78 | *model += *extended_model; | |
79 | } | |
80 | } | |
81 | ||
82 | if (cpu_features->max_cpuid >= 7) | |
83 | __cpuid_count (7, 0, | |
84 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].eax, | |
85 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx, | |
86 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ecx, | |
87 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].edx); | |
88 | ||
89 | /* Can we call xgetbv? */ | |
90 | if (CPU_FEATURES_CPU_P (cpu_features, OSXSAVE)) | |
91 | { | |
92 | unsigned int xcrlow; | |
93 | unsigned int xcrhigh; | |
94 | asm ("xgetbv" : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0)); | |
95 | /* Is YMM and XMM state usable? */ | |
96 | if ((xcrlow & (bit_YMM_state | bit_XMM_state)) == | |
97 | (bit_YMM_state | bit_XMM_state)) | |
98 | { | |
99 | /* Determine if AVX is usable. */ | |
100 | if (CPU_FEATURES_CPU_P (cpu_features, AVX)) | |
b3d17c1c CD |
101 | { |
102 | cpu_features->feature[index_arch_AVX_Usable] | |
103 | |= bit_arch_AVX_Usable; | |
104 | /* The following features depend on AVX being usable. */ | |
105 | /* Determine if AVX2 is usable. */ | |
106 | if (CPU_FEATURES_CPU_P (cpu_features, AVX2)) | |
bce5911b | 107 | { |
b3d17c1c CD |
108 | cpu_features->feature[index_arch_AVX2_Usable] |
109 | |= bit_arch_AVX2_Usable; | |
bce5911b AP |
110 | |
111 | /* Unaligned load with 256-bit AVX registers are faster on | |
112 | Intel/AMD processors with AVX2. */ | |
113 | cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load] | |
114 | |= bit_arch_AVX_Fast_Unaligned_Load; | |
115 | } | |
b3d17c1c CD |
116 | /* Determine if FMA is usable. */ |
117 | if (CPU_FEATURES_CPU_P (cpu_features, FMA)) | |
118 | cpu_features->feature[index_arch_FMA_Usable] | |
119 | |= bit_arch_FMA_Usable; | |
120 | } | |
121 | ||
f781a9e9 L |
122 | /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and |
123 | ZMM16-ZMM31 state are enabled. */ | |
124 | if ((xcrlow & (bit_Opmask_state | bit_ZMM0_15_state | |
125 | | bit_ZMM16_31_state)) == | |
126 | (bit_Opmask_state | bit_ZMM0_15_state | bit_ZMM16_31_state)) | |
127 | { | |
128 | /* Determine if AVX512F is usable. */ | |
129 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512F)) | |
130 | { | |
131 | cpu_features->feature[index_arch_AVX512F_Usable] | |
132 | |= bit_arch_AVX512F_Usable; | |
133 | /* Determine if AVX512DQ is usable. */ | |
134 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512DQ)) | |
135 | cpu_features->feature[index_arch_AVX512DQ_Usable] | |
136 | |= bit_arch_AVX512DQ_Usable; | |
137 | } | |
138 | } | |
f781a9e9 | 139 | } |
b52b0d79 L |
140 | |
141 | /* For _dl_runtime_resolve, set xsave_state_size to xsave area | |
142 | size + integer register save size and align it to 64 bytes. */ | |
143 | if (cpu_features->max_cpuid >= 0xd) | |
144 | { | |
145 | unsigned int eax, ebx, ecx, edx; | |
146 | ||
147 | __cpuid_count (0xd, 0, eax, ebx, ecx, edx); | |
148 | if (ebx != 0) | |
149 | { | |
150 | unsigned int xsave_state_full_size | |
151 | = ALIGN_UP (ebx + STATE_SAVE_OFFSET, 64); | |
152 | ||
153 | cpu_features->xsave_state_size | |
154 | = xsave_state_full_size; | |
155 | cpu_features->xsave_state_full_size | |
156 | = xsave_state_full_size; | |
157 | ||
158 | __cpuid_count (0xd, 1, eax, ebx, ecx, edx); | |
159 | ||
160 | /* Check if XSAVEC is available. */ | |
161 | if ((eax & (1 << 1)) != 0) | |
162 | { | |
163 | unsigned int xstate_comp_offsets[32]; | |
164 | unsigned int xstate_comp_sizes[32]; | |
165 | unsigned int i; | |
166 | ||
167 | xstate_comp_offsets[0] = 0; | |
168 | xstate_comp_offsets[1] = 160; | |
169 | xstate_comp_offsets[2] = 576; | |
170 | xstate_comp_sizes[0] = 160; | |
171 | xstate_comp_sizes[1] = 256; | |
172 | ||
173 | for (i = 2; i < 32; i++) | |
174 | { | |
175 | if ((STATE_SAVE_MASK & (1 << i)) != 0) | |
176 | { | |
177 | __cpuid_count (0xd, i, eax, ebx, ecx, edx); | |
178 | xstate_comp_sizes[i] = eax; | |
179 | } | |
180 | else | |
181 | { | |
182 | ecx = 0; | |
183 | xstate_comp_sizes[i] = 0; | |
184 | } | |
185 | ||
186 | if (i > 2) | |
187 | { | |
188 | xstate_comp_offsets[i] | |
189 | = (xstate_comp_offsets[i - 1] | |
190 | + xstate_comp_sizes[i -1]); | |
191 | if ((ecx & (1 << 1)) != 0) | |
192 | xstate_comp_offsets[i] | |
193 | = ALIGN_UP (xstate_comp_offsets[i], 64); | |
194 | } | |
195 | } | |
196 | ||
197 | /* Use XSAVEC. */ | |
198 | unsigned int size | |
199 | = xstate_comp_offsets[31] + xstate_comp_sizes[31]; | |
200 | if (size) | |
201 | { | |
202 | cpu_features->xsave_state_size | |
203 | = ALIGN_UP (size + STATE_SAVE_OFFSET, 64); | |
204 | cpu_features->feature[index_arch_XSAVEC_Usable] | |
205 | |= bit_arch_XSAVEC_Usable; | |
206 | } | |
207 | } | |
208 | } | |
209 | } | |
9627da32 | 210 | } |
01812913 L |
211 | } |
212 | ||
e2e4f560 L |
213 | static inline void |
214 | init_cpu_features (struct cpu_features *cpu_features) | |
425ce2ed | 215 | { |
e2e4f560 | 216 | unsigned int ebx, ecx, edx; |
22f4f44b UD |
217 | unsigned int family = 0; |
218 | unsigned int model = 0; | |
219 | enum cpu_features_kind kind; | |
425ce2ed | 220 | |
1814df5b | 221 | #if !HAS_CPUID |
a5cf909b L |
222 | if (__get_cpuid_max (0, 0) == 0) |
223 | { | |
224 | kind = arch_kind_other; | |
225 | goto no_cpuid; | |
226 | } | |
227 | #endif | |
228 | ||
e2e4f560 | 229 | __cpuid (0, cpu_features->max_cpuid, ebx, ecx, edx); |
425ce2ed UD |
230 | |
231 | /* This spells out "GenuineIntel". */ | |
232 | if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69) | |
233 | { | |
2702856b | 234 | unsigned int extended_model, stepping; |
9627da32 | 235 | |
22f4f44b | 236 | kind = arch_kind_intel; |
425ce2ed | 237 | |
82c80ac2 | 238 | get_common_indices (cpu_features, &family, &model, &extended_model, |
2702856b | 239 | &stepping); |
01812913 | 240 | |
be525a69 L |
241 | get_extended_indices (cpu_features); |
242 | ||
9627da32 | 243 | if (family == 0x06) |
3af48cbd | 244 | { |
22f4f44b | 245 | model += extended_model; |
3c88fe1e | 246 | switch (model) |
3af48cbd | 247 | { |
e73015f2 L |
248 | case 0x1c: |
249 | case 0x26: | |
250 | /* BSF is slow on Atom. */ | |
6aa3e97e L |
251 | cpu_features->feature[index_arch_Slow_BSF] |
252 | |= bit_arch_Slow_BSF; | |
e73015f2 L |
253 | break; |
254 | ||
c9afcaaa L |
255 | case 0x57: |
256 | /* Knights Landing. Enable Silvermont optimizations. */ | |
257 | ||
2e2d9796 L |
258 | case 0x5c: |
259 | case 0x5f: | |
260 | /* Unaligned load versions are faster than SSSE3 | |
261 | on Goldmont. */ | |
262 | ||
263 | case 0x4c: | |
264 | /* Airmont is a die shrink of Silvermont. */ | |
265 | ||
d086fc7b | 266 | case 0x37: |
ede0236c L |
267 | case 0x4a: |
268 | case 0x4d: | |
972af9e8 L |
269 | case 0x5a: |
270 | case 0x5d: | |
d086fc7b LD |
271 | /* Unaligned load versions are faster than SSSE3 |
272 | on Silvermont. */ | |
6aa3e97e L |
273 | #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
274 | # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop | |
6308fd9a | 275 | #endif |
6aa3e97e L |
276 | #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
277 | # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 | |
e41b3955 L |
278 | #endif |
279 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy | |
280 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy | |
6308fd9a | 281 | #endif |
6aa3e97e L |
282 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
283 | |= (bit_arch_Fast_Unaligned_Load | |
e41b3955 | 284 | | bit_arch_Fast_Unaligned_Copy |
6aa3e97e L |
285 | | bit_arch_Prefer_PMINUB_for_stringop |
286 | | bit_arch_Slow_SSE4_2); | |
d086fc7b LD |
287 | break; |
288 | ||
3d29045b L |
289 | default: |
290 | /* Unknown family 0x06 processors. Assuming this is one | |
ed72b654 | 291 | of Core i3/i5/i7 processors if AVX is available. */ |
b170d2e7 | 292 | if (!CPU_FEATURES_CPU_P (cpu_features, AVX)) |
3d29045b L |
293 | break; |
294 | ||
3af48cbd L |
295 | case 0x1a: |
296 | case 0x1e: | |
297 | case 0x1f: | |
298 | case 0x25: | |
13b69574 | 299 | case 0x2c: |
3af48cbd L |
300 | case 0x2e: |
301 | case 0x2f: | |
27d3ce14 | 302 | /* Rep string instructions, unaligned load, unaligned copy, |
99710781 | 303 | and pminub are fast on Intel Core i3, i5 and i7. */ |
6aa3e97e L |
304 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
305 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load | |
99710781 | 306 | #endif |
6aa3e97e L |
307 | #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
308 | # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop | |
e41b3955 L |
309 | #endif |
310 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy | |
311 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy | |
6fb8cbcb | 312 | #endif |
6aa3e97e L |
313 | cpu_features->feature[index_arch_Fast_Rep_String] |
314 | |= (bit_arch_Fast_Rep_String | |
6aa3e97e | 315 | | bit_arch_Fast_Unaligned_Load |
e41b3955 | 316 | | bit_arch_Fast_Unaligned_Copy |
6aa3e97e | 317 | | bit_arch_Prefer_PMINUB_for_stringop); |
3af48cbd | 318 | break; |
c3d8dc45 | 319 | } |
2702856b | 320 | |
c3d8dc45 AZ |
321 | /* Disable TSX on some Haswell processors to avoid TSX on kernels that |
322 | weren't updated with the latest microcode package (which disables | |
323 | broken feature by default). */ | |
324 | switch (model) | |
325 | { | |
2702856b AS |
326 | case 0x3f: |
327 | /* Xeon E7 v3 with stepping >= 4 has working TSX. */ | |
328 | if (stepping >= 4) | |
329 | break; | |
330 | case 0x3c: | |
331 | case 0x45: | |
332 | case 0x46: | |
333 | /* Disable Intel TSX on Haswell processors (except Xeon E7 v3 | |
334 | with stepping >= 4) to avoid TSX on kernels that weren't | |
335 | updated with the latest microcode package (which disables | |
336 | broken feature by default). */ | |
52ac2236 | 337 | cpu_features->cpuid[index_cpu_RTM].reg_RTM &= ~bit_cpu_RTM; |
2702856b | 338 | break; |
3af48cbd L |
339 | } |
340 | } | |
f781a9e9 | 341 | |
fb0f7a67 | 342 | |
1c53cb49 | 343 | /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER |
4cb334c4 L |
344 | if AVX512ER is available. Don't use AVX512 to avoid lower CPU |
345 | frequency if AVX512ER isn't available. */ | |
1c53cb49 L |
346 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER)) |
347 | cpu_features->feature[index_arch_Prefer_No_VZEROUPPER] | |
348 | |= bit_arch_Prefer_No_VZEROUPPER; | |
4cb334c4 L |
349 | else |
350 | cpu_features->feature[index_arch_Prefer_No_AVX512] | |
351 | |= bit_arch_Prefer_No_AVX512; | |
425ce2ed UD |
352 | } |
353 | /* This spells out "AuthenticAMD". */ | |
354 | else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) | |
355 | { | |
2702856b | 356 | unsigned int extended_model, stepping; |
9627da32 | 357 | |
22f4f44b | 358 | kind = arch_kind_amd; |
425ce2ed | 359 | |
82c80ac2 | 360 | get_common_indices (cpu_features, &family, &model, &extended_model, |
2702856b | 361 | &stepping); |
7e4ba49c | 362 | |
be525a69 | 363 | get_extended_indices (cpu_features); |
7e4ba49c | 364 | |
be525a69 | 365 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
d7890e69 | 366 | |
91655fc3 L |
367 | if (HAS_ARCH_FEATURE (AVX_Usable)) |
368 | { | |
369 | /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and | |
370 | FMA4 requires AVX, determine if FMA4 is usable here. */ | |
371 | if (CPU_FEATURES_CPU_P (cpu_features, FMA4)) | |
372 | cpu_features->feature[index_arch_FMA4_Usable] | |
373 | |= bit_arch_FMA4_Usable; | |
374 | } | |
375 | ||
d7890e69 AP |
376 | if (family == 0x15) |
377 | { | |
e41b3955 L |
378 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
379 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward | |
380 | #endif | |
d7890e69 AP |
381 | /* "Excavator" */ |
382 | if (model >= 0x60 && model <= 0x7f) | |
bce5911b | 383 | { |
6aa3e97e | 384 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
e41b3955 L |
385 | |= (bit_arch_Fast_Unaligned_Load |
386 | | bit_arch_Fast_Copy_Backward); | |
bce5911b AP |
387 | |
388 | /* Unaligned AVX loads are slower.*/ | |
389 | cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load] | |
390 | &= ~bit_arch_AVX_Fast_Unaligned_Load; | |
391 | } | |
d7890e69 | 392 | } |
425ce2ed UD |
393 | } |
394 | else | |
f781a9e9 L |
395 | { |
396 | kind = arch_kind_other; | |
82c80ac2 | 397 | get_common_indices (cpu_features, NULL, NULL, NULL, NULL); |
f781a9e9 | 398 | } |
22f4f44b | 399 | |
75dd0a8f | 400 | /* Support i586 if CX8 is available. */ |
f781a9e9 | 401 | if (CPU_FEATURES_CPU_P (cpu_features, CX8)) |
6aa3e97e | 402 | cpu_features->feature[index_arch_I586] |= bit_arch_I586; |
75dd0a8f L |
403 | |
404 | /* Support i686 if CMOV is available. */ | |
f781a9e9 | 405 | if (CPU_FEATURES_CPU_P (cpu_features, CMOV)) |
6aa3e97e | 406 | cpu_features->feature[index_arch_I686] |= bit_arch_I686; |
75dd0a8f | 407 | |
1814df5b | 408 | #if !HAS_CPUID |
a5cf909b L |
409 | no_cpuid: |
410 | #endif | |
411 | ||
e2e4f560 L |
412 | cpu_features->family = family; |
413 | cpu_features->model = model; | |
414 | cpu_features->kind = kind; | |
1432d38e | 415 | |
905947c3 | 416 | #if HAVE_TUNABLES |
03feacb5 | 417 | TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps)); |
905947c3 L |
418 | cpu_features->non_temporal_threshold |
419 | = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL); | |
420 | cpu_features->data_cache_size | |
421 | = TUNABLE_GET (x86_data_cache_size, long int, NULL); | |
422 | cpu_features->shared_cache_size | |
423 | = TUNABLE_GET (x86_shared_cache_size, long int, NULL); | |
424 | #endif | |
425 | ||
1432d38e | 426 | /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */ |
511c5a10 | 427 | #if !HAVE_TUNABLES && defined SHARED |
dce452dc | 428 | /* The glibc.cpu.hwcap_mask tunable is initialized already, so no need to do |
ff08fc59 | 429 | this. */ |
1432d38e | 430 | GLRO(dl_hwcap_mask) = HWCAP_IMPORTANT; |
ff08fc59 | 431 | #endif |
1432d38e | 432 | |
511c5a10 | 433 | #ifdef __x86_64__ |
45ff3463 | 434 | GLRO(dl_hwcap) = HWCAP_X86_64; |
1432d38e L |
435 | if (cpu_features->kind == arch_kind_intel) |
436 | { | |
4d916f0f L |
437 | const char *platform = NULL; |
438 | ||
1432d38e L |
439 | if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable) |
440 | && CPU_FEATURES_CPU_P (cpu_features, AVX512CD)) | |
441 | { | |
442 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER)) | |
443 | { | |
444 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512PF)) | |
4d916f0f | 445 | platform = "xeon_phi"; |
1432d38e L |
446 | } |
447 | else | |
448 | { | |
449 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512BW) | |
450 | && CPU_FEATURES_CPU_P (cpu_features, AVX512DQ) | |
451 | && CPU_FEATURES_CPU_P (cpu_features, AVX512VL)) | |
452 | GLRO(dl_hwcap) |= HWCAP_X86_AVX512_1; | |
453 | } | |
454 | } | |
455 | ||
4d916f0f | 456 | if (platform == NULL |
1432d38e L |
457 | && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable) |
458 | && CPU_FEATURES_ARCH_P (cpu_features, FMA_Usable) | |
459 | && CPU_FEATURES_CPU_P (cpu_features, BMI1) | |
460 | && CPU_FEATURES_CPU_P (cpu_features, BMI2) | |
461 | && CPU_FEATURES_CPU_P (cpu_features, LZCNT) | |
462 | && CPU_FEATURES_CPU_P (cpu_features, MOVBE) | |
463 | && CPU_FEATURES_CPU_P (cpu_features, POPCNT)) | |
4d916f0f L |
464 | platform = "haswell"; |
465 | ||
466 | if (platform != NULL) | |
467 | GLRO(dl_platform) = platform; | |
1432d38e | 468 | } |
511c5a10 | 469 | #else |
45ff3463 | 470 | GLRO(dl_hwcap) = 0; |
1432d38e L |
471 | if (CPU_FEATURES_CPU_P (cpu_features, SSE2)) |
472 | GLRO(dl_hwcap) |= HWCAP_X86_SSE2; | |
473 | ||
474 | if (CPU_FEATURES_ARCH_P (cpu_features, I686)) | |
475 | GLRO(dl_platform) = "i686"; | |
476 | else if (CPU_FEATURES_ARCH_P (cpu_features, I586)) | |
477 | GLRO(dl_platform) = "i586"; | |
1432d38e | 478 | #endif |
f753fa7d L |
479 | |
480 | #if CET_ENABLED | |
481 | # if HAVE_TUNABLES | |
482 | TUNABLE_GET (x86_ibt, tunable_val_t *, | |
483 | TUNABLE_CALLBACK (set_x86_ibt)); | |
484 | TUNABLE_GET (x86_shstk, tunable_val_t *, | |
485 | TUNABLE_CALLBACK (set_x86_shstk)); | |
486 | # endif | |
487 | ||
488 | /* Check CET status. */ | |
489 | unsigned int cet_status = get_cet_status (); | |
490 | ||
491 | if (cet_status) | |
492 | { | |
493 | GL(dl_x86_feature_1)[0] = cet_status; | |
494 | ||
495 | # ifndef SHARED | |
496 | /* Check if IBT and SHSTK are enabled by kernel. */ | |
497 | if ((cet_status & GNU_PROPERTY_X86_FEATURE_1_IBT) | |
498 | || (cet_status & GNU_PROPERTY_X86_FEATURE_1_SHSTK)) | |
499 | { | |
500 | /* Disable IBT and/or SHSTK if they are enabled by kernel, but | |
501 | disabled by environment variable: | |
502 | ||
dce452dc | 503 | GLIBC_TUNABLES=glibc.cpu.hwcaps=-IBT,-SHSTK |
f753fa7d L |
504 | */ |
505 | unsigned int cet_feature = 0; | |
506 | if (!HAS_CPU_FEATURE (IBT)) | |
507 | cet_feature |= GNU_PROPERTY_X86_FEATURE_1_IBT; | |
508 | if (!HAS_CPU_FEATURE (SHSTK)) | |
509 | cet_feature |= GNU_PROPERTY_X86_FEATURE_1_SHSTK; | |
510 | ||
511 | if (cet_feature) | |
512 | { | |
513 | int res = dl_cet_disable_cet (cet_feature); | |
514 | ||
515 | /* Clear the disabled bits in dl_x86_feature_1. */ | |
516 | if (res == 0) | |
517 | GL(dl_x86_feature_1)[0] &= ~cet_feature; | |
518 | } | |
519 | ||
520 | /* Lock CET if IBT or SHSTK is enabled in executable. Don't | |
521 | lock CET if SHSTK is enabled permissively. */ | |
522 | if (((GL(dl_x86_feature_1)[1] >> CET_MAX) | |
523 | & ((1 << CET_MAX) - 1)) | |
524 | != CET_PERMISSIVE) | |
525 | dl_cet_lock_cet (); | |
526 | } | |
527 | # endif | |
528 | } | |
529 | #endif | |
9a1d2d45 | 530 | } |