1 /* Initialize CPU feature data.
2 This file is part of the GNU C Library.
3 Copyright (C) 2008-2018 Free Software Foundation, Inc.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include <cpu-features.h>
22 #include <libc-pointer-arith.h>
25 # define TUNABLE_NAMESPACE cpu
26 # include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */
27 # include <elf/dl-tunables.h>
29 extern void TUNABLE_CALLBACK (set_hwcaps
) (tunable_val_t
*)
33 extern void TUNABLE_CALLBACK (set_x86_ibt
) (tunable_val_t
*)
35 extern void TUNABLE_CALLBACK (set_x86_shstk
) (tunable_val_t
*)
42 # include <cet-tunables.h>
46 get_extended_indices (struct cpu_features
*cpu_features
)
48 unsigned int eax
, ebx
, ecx
, edx
;
49 __cpuid (0x80000000, eax
, ebx
, ecx
, edx
);
50 if (eax
>= 0x80000001)
52 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].eax
,
53 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].ebx
,
54 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].ecx
,
55 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].edx
);
60 get_common_indices (struct cpu_features
*cpu_features
,
61 unsigned int *family
, unsigned int *model
,
62 unsigned int *extended_model
, unsigned int *stepping
)
67 __cpuid (1, eax
, cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].ebx
,
68 cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].ecx
,
69 cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].edx
);
70 cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].eax
= eax
;
71 *family
= (eax
>> 8) & 0x0f;
72 *model
= (eax
>> 4) & 0x0f;
73 *extended_model
= (eax
>> 12) & 0xf0;
74 *stepping
= eax
& 0x0f;
77 *family
+= (eax
>> 20) & 0xff;
78 *model
+= *extended_model
;
82 if (cpu_features
->max_cpuid
>= 7)
84 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].eax
,
85 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].ebx
,
86 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].ecx
,
87 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].edx
);
89 /* Can we call xgetbv? */
90 if (CPU_FEATURES_CPU_P (cpu_features
, OSXSAVE
))
94 asm ("xgetbv" : "=a" (xcrlow
), "=d" (xcrhigh
) : "c" (0));
95 /* Is YMM and XMM state usable? */
96 if ((xcrlow
& (bit_YMM_state
| bit_XMM_state
)) ==
97 (bit_YMM_state
| bit_XMM_state
))
99 /* Determine if AVX is usable. */
100 if (CPU_FEATURES_CPU_P (cpu_features
, AVX
))
102 cpu_features
->feature
[index_arch_AVX_Usable
]
103 |= bit_arch_AVX_Usable
;
104 /* The following features depend on AVX being usable. */
105 /* Determine if AVX2 is usable. */
106 if (CPU_FEATURES_CPU_P (cpu_features
, AVX2
))
108 cpu_features
->feature
[index_arch_AVX2_Usable
]
109 |= bit_arch_AVX2_Usable
;
111 /* Unaligned load with 256-bit AVX registers are faster on
112 Intel/AMD processors with AVX2. */
113 cpu_features
->feature
[index_arch_AVX_Fast_Unaligned_Load
]
114 |= bit_arch_AVX_Fast_Unaligned_Load
;
116 /* Determine if FMA is usable. */
117 if (CPU_FEATURES_CPU_P (cpu_features
, FMA
))
118 cpu_features
->feature
[index_arch_FMA_Usable
]
119 |= bit_arch_FMA_Usable
;
122 /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and
123 ZMM16-ZMM31 state are enabled. */
124 if ((xcrlow
& (bit_Opmask_state
| bit_ZMM0_15_state
125 | bit_ZMM16_31_state
)) ==
126 (bit_Opmask_state
| bit_ZMM0_15_state
| bit_ZMM16_31_state
))
128 /* Determine if AVX512F is usable. */
129 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512F
))
131 cpu_features
->feature
[index_arch_AVX512F_Usable
]
132 |= bit_arch_AVX512F_Usable
;
133 /* Determine if AVX512DQ is usable. */
134 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512DQ
))
135 cpu_features
->feature
[index_arch_AVX512DQ_Usable
]
136 |= bit_arch_AVX512DQ_Usable
;
141 /* For _dl_runtime_resolve, set xsave_state_size to xsave area
142 size + integer register save size and align it to 64 bytes. */
143 if (cpu_features
->max_cpuid
>= 0xd)
145 unsigned int eax
, ebx
, ecx
, edx
;
147 __cpuid_count (0xd, 0, eax
, ebx
, ecx
, edx
);
150 unsigned int xsave_state_full_size
151 = ALIGN_UP (ebx
+ STATE_SAVE_OFFSET
, 64);
153 cpu_features
->xsave_state_size
154 = xsave_state_full_size
;
155 cpu_features
->xsave_state_full_size
156 = xsave_state_full_size
;
158 __cpuid_count (0xd, 1, eax
, ebx
, ecx
, edx
);
160 /* Check if XSAVEC is available. */
161 if ((eax
& (1 << 1)) != 0)
163 unsigned int xstate_comp_offsets
[32];
164 unsigned int xstate_comp_sizes
[32];
167 xstate_comp_offsets
[0] = 0;
168 xstate_comp_offsets
[1] = 160;
169 xstate_comp_offsets
[2] = 576;
170 xstate_comp_sizes
[0] = 160;
171 xstate_comp_sizes
[1] = 256;
173 for (i
= 2; i
< 32; i
++)
175 if ((STATE_SAVE_MASK
& (1 << i
)) != 0)
177 __cpuid_count (0xd, i
, eax
, ebx
, ecx
, edx
);
178 xstate_comp_sizes
[i
] = eax
;
183 xstate_comp_sizes
[i
] = 0;
188 xstate_comp_offsets
[i
]
189 = (xstate_comp_offsets
[i
- 1]
190 + xstate_comp_sizes
[i
-1]);
191 if ((ecx
& (1 << 1)) != 0)
192 xstate_comp_offsets
[i
]
193 = ALIGN_UP (xstate_comp_offsets
[i
], 64);
199 = xstate_comp_offsets
[31] + xstate_comp_sizes
[31];
202 cpu_features
->xsave_state_size
203 = ALIGN_UP (size
+ STATE_SAVE_OFFSET
, 64);
204 cpu_features
->feature
[index_arch_XSAVEC_Usable
]
205 |= bit_arch_XSAVEC_Usable
;
214 init_cpu_features (struct cpu_features
*cpu_features
)
216 unsigned int ebx
, ecx
, edx
;
217 unsigned int family
= 0;
218 unsigned int model
= 0;
219 enum cpu_features_kind kind
;
222 if (__get_cpuid_max (0, 0) == 0)
224 kind
= arch_kind_other
;
229 __cpuid (0, cpu_features
->max_cpuid
, ebx
, ecx
, edx
);
231 /* This spells out "GenuineIntel". */
232 if (ebx
== 0x756e6547 && ecx
== 0x6c65746e && edx
== 0x49656e69)
234 unsigned int extended_model
, stepping
;
236 kind
= arch_kind_intel
;
238 get_common_indices (cpu_features
, &family
, &model
, &extended_model
,
241 get_extended_indices (cpu_features
);
245 model
+= extended_model
;
250 /* BSF is slow on Atom. */
251 cpu_features
->feature
[index_arch_Slow_BSF
]
252 |= bit_arch_Slow_BSF
;
256 /* Knights Landing. Enable Silvermont optimizations. */
260 /* Unaligned load versions are faster than SSSE3
264 /* Airmont is a die shrink of Silvermont. */
271 /* Unaligned load versions are faster than SSSE3
273 #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
274 # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
276 #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
277 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
279 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
280 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
282 cpu_features
->feature
[index_arch_Fast_Unaligned_Load
]
283 |= (bit_arch_Fast_Unaligned_Load
284 | bit_arch_Fast_Unaligned_Copy
285 | bit_arch_Prefer_PMINUB_for_stringop
286 | bit_arch_Slow_SSE4_2
);
290 /* Unknown family 0x06 processors. Assuming this is one
291 of Core i3/i5/i7 processors if AVX is available. */
292 if (!CPU_FEATURES_CPU_P (cpu_features
, AVX
))
302 /* Rep string instructions, unaligned load, unaligned copy,
303 and pminub are fast on Intel Core i3, i5 and i7. */
304 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
305 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
307 #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
308 # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
310 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
311 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
313 cpu_features
->feature
[index_arch_Fast_Rep_String
]
314 |= (bit_arch_Fast_Rep_String
315 | bit_arch_Fast_Unaligned_Load
316 | bit_arch_Fast_Unaligned_Copy
317 | bit_arch_Prefer_PMINUB_for_stringop
);
321 /* Disable TSX on some Haswell processors to avoid TSX on kernels that
322 weren't updated with the latest microcode package (which disables
323 broken feature by default). */
327 /* Xeon E7 v3 with stepping >= 4 has working TSX. */
333 /* Disable Intel TSX on Haswell processors (except Xeon E7 v3
334 with stepping >= 4) to avoid TSX on kernels that weren't
335 updated with the latest microcode package (which disables
336 broken feature by default). */
337 cpu_features
->cpuid
[index_cpu_RTM
].reg_RTM
&= ~bit_cpu_RTM
;
343 /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER
344 if AVX512ER is available. Don't use AVX512 to avoid lower CPU
345 frequency if AVX512ER isn't available. */
346 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512ER
))
347 cpu_features
->feature
[index_arch_Prefer_No_VZEROUPPER
]
348 |= bit_arch_Prefer_No_VZEROUPPER
;
350 cpu_features
->feature
[index_arch_Prefer_No_AVX512
]
351 |= bit_arch_Prefer_No_AVX512
;
353 /* This spells out "AuthenticAMD". */
354 else if (ebx
== 0x68747541 && ecx
== 0x444d4163 && edx
== 0x69746e65)
356 unsigned int extended_model
, stepping
;
358 kind
= arch_kind_amd
;
360 get_common_indices (cpu_features
, &family
, &model
, &extended_model
,
363 get_extended_indices (cpu_features
);
365 ecx
= cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].ecx
;
367 if (HAS_ARCH_FEATURE (AVX_Usable
))
369 /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and
370 FMA4 requires AVX, determine if FMA4 is usable here. */
371 if (CPU_FEATURES_CPU_P (cpu_features
, FMA4
))
372 cpu_features
->feature
[index_arch_FMA4_Usable
]
373 |= bit_arch_FMA4_Usable
;
378 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
379 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
382 if (model
>= 0x60 && model
<= 0x7f)
384 cpu_features
->feature
[index_arch_Fast_Unaligned_Load
]
385 |= (bit_arch_Fast_Unaligned_Load
386 | bit_arch_Fast_Copy_Backward
);
388 /* Unaligned AVX loads are slower.*/
389 cpu_features
->feature
[index_arch_AVX_Fast_Unaligned_Load
]
390 &= ~bit_arch_AVX_Fast_Unaligned_Load
;
396 kind
= arch_kind_other
;
397 get_common_indices (cpu_features
, NULL
, NULL
, NULL
, NULL
);
400 /* Support i586 if CX8 is available. */
401 if (CPU_FEATURES_CPU_P (cpu_features
, CX8
))
402 cpu_features
->feature
[index_arch_I586
] |= bit_arch_I586
;
404 /* Support i686 if CMOV is available. */
405 if (CPU_FEATURES_CPU_P (cpu_features
, CMOV
))
406 cpu_features
->feature
[index_arch_I686
] |= bit_arch_I686
;
412 cpu_features
->family
= family
;
413 cpu_features
->model
= model
;
414 cpu_features
->kind
= kind
;
417 TUNABLE_GET (hwcaps
, tunable_val_t
*, TUNABLE_CALLBACK (set_hwcaps
));
418 cpu_features
->non_temporal_threshold
419 = TUNABLE_GET (x86_non_temporal_threshold
, long int, NULL
);
420 cpu_features
->data_cache_size
421 = TUNABLE_GET (x86_data_cache_size
, long int, NULL
);
422 cpu_features
->shared_cache_size
423 = TUNABLE_GET (x86_shared_cache_size
, long int, NULL
);
426 /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */
427 #if !HAVE_TUNABLES && defined SHARED
428 /* The glibc.cpu.hwcap_mask tunable is initialized already, so no need to do
430 GLRO(dl_hwcap_mask
) = HWCAP_IMPORTANT
;
434 GLRO(dl_hwcap
) = HWCAP_X86_64
;
435 if (cpu_features
->kind
== arch_kind_intel
)
437 const char *platform
= NULL
;
439 if (CPU_FEATURES_ARCH_P (cpu_features
, AVX512F_Usable
)
440 && CPU_FEATURES_CPU_P (cpu_features
, AVX512CD
))
442 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512ER
))
444 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512PF
))
445 platform
= "xeon_phi";
449 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512BW
)
450 && CPU_FEATURES_CPU_P (cpu_features
, AVX512DQ
)
451 && CPU_FEATURES_CPU_P (cpu_features
, AVX512VL
))
452 GLRO(dl_hwcap
) |= HWCAP_X86_AVX512_1
;
457 && CPU_FEATURES_ARCH_P (cpu_features
, AVX2_Usable
)
458 && CPU_FEATURES_ARCH_P (cpu_features
, FMA_Usable
)
459 && CPU_FEATURES_CPU_P (cpu_features
, BMI1
)
460 && CPU_FEATURES_CPU_P (cpu_features
, BMI2
)
461 && CPU_FEATURES_CPU_P (cpu_features
, LZCNT
)
462 && CPU_FEATURES_CPU_P (cpu_features
, MOVBE
)
463 && CPU_FEATURES_CPU_P (cpu_features
, POPCNT
))
464 platform
= "haswell";
466 if (platform
!= NULL
)
467 GLRO(dl_platform
) = platform
;
471 if (CPU_FEATURES_CPU_P (cpu_features
, SSE2
))
472 GLRO(dl_hwcap
) |= HWCAP_X86_SSE2
;
474 if (CPU_FEATURES_ARCH_P (cpu_features
, I686
))
475 GLRO(dl_platform
) = "i686";
476 else if (CPU_FEATURES_ARCH_P (cpu_features
, I586
))
477 GLRO(dl_platform
) = "i586";
482 TUNABLE_GET (x86_ibt
, tunable_val_t
*,
483 TUNABLE_CALLBACK (set_x86_ibt
));
484 TUNABLE_GET (x86_shstk
, tunable_val_t
*,
485 TUNABLE_CALLBACK (set_x86_shstk
));
488 /* Check CET status. */
489 unsigned int cet_status
= get_cet_status ();
493 GL(dl_x86_feature_1
)[0] = cet_status
;
496 /* Check if IBT and SHSTK are enabled by kernel. */
497 if ((cet_status
& GNU_PROPERTY_X86_FEATURE_1_IBT
)
498 || (cet_status
& GNU_PROPERTY_X86_FEATURE_1_SHSTK
))
500 /* Disable IBT and/or SHSTK if they are enabled by kernel, but
501 disabled by environment variable:
503 GLIBC_TUNABLES=glibc.cpu.hwcaps=-IBT,-SHSTK
505 unsigned int cet_feature
= 0;
506 if (!HAS_CPU_FEATURE (IBT
))
507 cet_feature
|= GNU_PROPERTY_X86_FEATURE_1_IBT
;
508 if (!HAS_CPU_FEATURE (SHSTK
))
509 cet_feature
|= GNU_PROPERTY_X86_FEATURE_1_SHSTK
;
513 int res
= dl_cet_disable_cet (cet_feature
);
515 /* Clear the disabled bits in dl_x86_feature_1. */
517 GL(dl_x86_feature_1
)[0] &= ~cet_feature
;
520 /* Lock CET if IBT or SHSTK is enabled in executable. Don't
521 lock CET if SHSTK is enabled permissively. */
522 if (((GL(dl_x86_feature_1
)[1] >> CET_MAX
)
523 & ((1 << CET_MAX
) - 1))