1 /* Initialize CPU feature data.
2 This file is part of the GNU C Library.
3 Copyright (C) 2008-2017 Free Software Foundation, Inc.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include <cpu-features.h>
24 # define TUNABLE_NAMESPACE tune
25 # include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */
26 # include <elf/dl-tunables.h>
28 extern void TUNABLE_CALLBACK (set_hwcaps
) (tunable_val_t
*)
33 get_common_indeces (struct cpu_features
*cpu_features
,
34 unsigned int *family
, unsigned int *model
,
35 unsigned int *extended_model
, unsigned int *stepping
)
40 __cpuid (1, eax
, cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].ebx
,
41 cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].ecx
,
42 cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].edx
);
43 cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].eax
= eax
;
44 *family
= (eax
>> 8) & 0x0f;
45 *model
= (eax
>> 4) & 0x0f;
46 *extended_model
= (eax
>> 12) & 0xf0;
47 *stepping
= eax
& 0x0f;
50 *family
+= (eax
>> 20) & 0xff;
51 *model
+= *extended_model
;
55 if (cpu_features
->max_cpuid
>= 7)
57 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].eax
,
58 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].ebx
,
59 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].ecx
,
60 cpu_features
->cpuid
[COMMON_CPUID_INDEX_7
].edx
);
62 /* Can we call xgetbv? */
63 if (CPU_FEATURES_CPU_P (cpu_features
, OSXSAVE
))
67 asm ("xgetbv" : "=a" (xcrlow
), "=d" (xcrhigh
) : "c" (0));
68 /* Is YMM and XMM state usable? */
69 if ((xcrlow
& (bit_YMM_state
| bit_XMM_state
)) ==
70 (bit_YMM_state
| bit_XMM_state
))
72 /* Determine if AVX is usable. */
73 if (CPU_FEATURES_CPU_P (cpu_features
, AVX
))
75 cpu_features
->feature
[index_arch_AVX_Usable
]
76 |= bit_arch_AVX_Usable
;
77 /* The following features depend on AVX being usable. */
78 /* Determine if AVX2 is usable. */
79 if (CPU_FEATURES_CPU_P (cpu_features
, AVX2
))
80 cpu_features
->feature
[index_arch_AVX2_Usable
]
81 |= bit_arch_AVX2_Usable
;
82 /* Determine if FMA is usable. */
83 if (CPU_FEATURES_CPU_P (cpu_features
, FMA
))
84 cpu_features
->feature
[index_arch_FMA_Usable
]
85 |= bit_arch_FMA_Usable
;
88 /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and
89 ZMM16-ZMM31 state are enabled. */
90 if ((xcrlow
& (bit_Opmask_state
| bit_ZMM0_15_state
91 | bit_ZMM16_31_state
)) ==
92 (bit_Opmask_state
| bit_ZMM0_15_state
| bit_ZMM16_31_state
))
94 /* Determine if AVX512F is usable. */
95 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512F
))
97 cpu_features
->feature
[index_arch_AVX512F_Usable
]
98 |= bit_arch_AVX512F_Usable
;
99 /* Determine if AVX512DQ is usable. */
100 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512DQ
))
101 cpu_features
->feature
[index_arch_AVX512DQ_Usable
]
102 |= bit_arch_AVX512DQ_Usable
;
110 init_cpu_features (struct cpu_features
*cpu_features
)
112 unsigned int ebx
, ecx
, edx
;
113 unsigned int family
= 0;
114 unsigned int model
= 0;
115 enum cpu_features_kind kind
;
118 if (__get_cpuid_max (0, 0) == 0)
120 kind
= arch_kind_other
;
125 __cpuid (0, cpu_features
->max_cpuid
, ebx
, ecx
, edx
);
127 /* This spells out "GenuineIntel". */
128 if (ebx
== 0x756e6547 && ecx
== 0x6c65746e && edx
== 0x49656e69)
130 unsigned int extended_model
, stepping
;
132 kind
= arch_kind_intel
;
134 get_common_indeces (cpu_features
, &family
, &model
, &extended_model
,
139 model
+= extended_model
;
144 /* BSF is slow on Atom. */
145 cpu_features
->feature
[index_arch_Slow_BSF
]
146 |= bit_arch_Slow_BSF
;
150 /* Knights Landing. Enable Silvermont optimizations. */
154 /* Unaligned load versions are faster than SSSE3
158 /* Airmont is a die shrink of Silvermont. */
165 /* Unaligned load versions are faster than SSSE3
167 #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
168 # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
170 #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
171 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
173 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
174 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
176 cpu_features
->feature
[index_arch_Fast_Unaligned_Load
]
177 |= (bit_arch_Fast_Unaligned_Load
178 | bit_arch_Fast_Unaligned_Copy
179 | bit_arch_Prefer_PMINUB_for_stringop
180 | bit_arch_Slow_SSE4_2
);
184 /* Unknown family 0x06 processors. Assuming this is one
185 of Core i3/i5/i7 processors if AVX is available. */
186 if (!CPU_FEATURES_CPU_P (cpu_features
, AVX
))
196 /* Rep string instructions, unaligned load, unaligned copy,
197 and pminub are fast on Intel Core i3, i5 and i7. */
198 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
199 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
201 #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
202 # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
204 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
205 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
207 cpu_features
->feature
[index_arch_Fast_Rep_String
]
208 |= (bit_arch_Fast_Rep_String
209 | bit_arch_Fast_Unaligned_Load
210 | bit_arch_Fast_Unaligned_Copy
211 | bit_arch_Prefer_PMINUB_for_stringop
);
215 /* Xeon E7 v3 with stepping >= 4 has working TSX. */
221 /* Disable Intel TSX on Haswell processors (except Xeon E7 v3
222 with stepping >= 4) to avoid TSX on kernels that weren't
223 updated with the latest microcode package (which disables
224 broken feature by default). */
225 cpu_features
->cpuid
[index_cpu_RTM
].reg_RTM
&= ~bit_cpu_RTM
;
230 /* Unaligned load with 256-bit AVX registers are faster on
231 Intel processors with AVX2. */
232 if (CPU_FEATURES_ARCH_P (cpu_features
, AVX2_Usable
))
233 cpu_features
->feature
[index_arch_AVX_Fast_Unaligned_Load
]
234 |= bit_arch_AVX_Fast_Unaligned_Load
;
236 /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER
237 if AVX512ER is available. Don't use AVX512 to avoid lower CPU
238 frequency if AVX512ER isn't available. */
239 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512ER
))
240 cpu_features
->feature
[index_arch_Prefer_No_VZEROUPPER
]
241 |= bit_arch_Prefer_No_VZEROUPPER
;
243 cpu_features
->feature
[index_arch_Prefer_No_AVX512
]
244 |= bit_arch_Prefer_No_AVX512
;
246 /* To avoid SSE transition penalty, use _dl_runtime_resolve_slow.
247 If XGETBV suports ECX == 1, use _dl_runtime_resolve_opt.
248 Use _dl_runtime_resolve_opt only with AVX512F since it is
249 slower than _dl_runtime_resolve_slow with AVX. */
250 cpu_features
->feature
[index_arch_Use_dl_runtime_resolve_slow
]
251 |= bit_arch_Use_dl_runtime_resolve_slow
;
252 if (CPU_FEATURES_ARCH_P (cpu_features
, AVX512F_Usable
)
253 && cpu_features
->max_cpuid
>= 0xd)
257 __cpuid_count (0xd, 1, eax
, ebx
, ecx
, edx
);
258 if ((eax
& (1 << 2)) != 0)
259 cpu_features
->feature
[index_arch_Use_dl_runtime_resolve_opt
]
260 |= bit_arch_Use_dl_runtime_resolve_opt
;
263 /* This spells out "AuthenticAMD". */
264 else if (ebx
== 0x68747541 && ecx
== 0x444d4163 && edx
== 0x69746e65)
266 unsigned int extended_model
, stepping
;
268 kind
= arch_kind_amd
;
270 get_common_indeces (cpu_features
, &family
, &model
, &extended_model
,
273 ecx
= cpu_features
->cpuid
[COMMON_CPUID_INDEX_1
].ecx
;
276 __cpuid (0x80000000, eax
, ebx
, ecx
, edx
);
277 if (eax
>= 0x80000001)
279 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].eax
,
280 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].ebx
,
281 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].ecx
,
282 cpu_features
->cpuid
[COMMON_CPUID_INDEX_80000001
].edx
);
284 if (HAS_ARCH_FEATURE (AVX_Usable
))
286 /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and
287 FMA4 requires AVX, determine if FMA4 is usable here. */
288 if (CPU_FEATURES_CPU_P (cpu_features
, FMA4
))
289 cpu_features
->feature
[index_arch_FMA4_Usable
]
290 |= bit_arch_FMA4_Usable
;
295 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
296 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
299 if (model
>= 0x60 && model
<= 0x7f)
300 cpu_features
->feature
[index_arch_Fast_Unaligned_Load
]
301 |= (bit_arch_Fast_Unaligned_Load
302 | bit_arch_Fast_Copy_Backward
);
307 kind
= arch_kind_other
;
308 get_common_indeces (cpu_features
, NULL
, NULL
, NULL
, NULL
);
311 /* Support i586 if CX8 is available. */
312 if (CPU_FEATURES_CPU_P (cpu_features
, CX8
))
313 cpu_features
->feature
[index_arch_I586
] |= bit_arch_I586
;
315 /* Support i686 if CMOV is available. */
316 if (CPU_FEATURES_CPU_P (cpu_features
, CMOV
))
317 cpu_features
->feature
[index_arch_I686
] |= bit_arch_I686
;
323 cpu_features
->family
= family
;
324 cpu_features
->model
= model
;
325 cpu_features
->kind
= kind
;
328 TUNABLE_GET (hwcaps
, tunable_val_t
*, TUNABLE_CALLBACK (set_hwcaps
));
329 cpu_features
->non_temporal_threshold
330 = TUNABLE_GET (x86_non_temporal_threshold
, long int, NULL
);
331 cpu_features
->data_cache_size
332 = TUNABLE_GET (x86_data_cache_size
, long int, NULL
);
333 cpu_features
->shared_cache_size
334 = TUNABLE_GET (x86_shared_cache_size
, long int, NULL
);
337 /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */
338 GLRO(dl_platform
) = NULL
;
339 #if !HAVE_TUNABLES && defined SHARED
340 /* The glibc.tune.hwcap_mask tunable is initialized already, so no need to do
342 GLRO(dl_hwcap_mask
) = HWCAP_IMPORTANT
;
346 GLRO(dl_hwcap
) = HWCAP_X86_64
;
347 if (cpu_features
->kind
== arch_kind_intel
)
349 if (CPU_FEATURES_ARCH_P (cpu_features
, AVX512F_Usable
)
350 && CPU_FEATURES_CPU_P (cpu_features
, AVX512CD
))
352 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512ER
))
354 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512PF
))
355 GLRO(dl_platform
) = "xeon_phi";
359 if (CPU_FEATURES_CPU_P (cpu_features
, AVX512BW
)
360 && CPU_FEATURES_CPU_P (cpu_features
, AVX512DQ
)
361 && CPU_FEATURES_CPU_P (cpu_features
, AVX512VL
))
362 GLRO(dl_hwcap
) |= HWCAP_X86_AVX512_1
;
366 if (GLRO(dl_platform
) == NULL
367 && CPU_FEATURES_ARCH_P (cpu_features
, AVX2_Usable
)
368 && CPU_FEATURES_ARCH_P (cpu_features
, FMA_Usable
)
369 && CPU_FEATURES_CPU_P (cpu_features
, BMI1
)
370 && CPU_FEATURES_CPU_P (cpu_features
, BMI2
)
371 && CPU_FEATURES_CPU_P (cpu_features
, LZCNT
)
372 && CPU_FEATURES_CPU_P (cpu_features
, MOVBE
)
373 && CPU_FEATURES_CPU_P (cpu_features
, POPCNT
))
374 GLRO(dl_platform
) = "haswell";
378 if (CPU_FEATURES_CPU_P (cpu_features
, SSE2
))
379 GLRO(dl_hwcap
) |= HWCAP_X86_SSE2
;
381 if (CPU_FEATURES_ARCH_P (cpu_features
, I686
))
382 GLRO(dl_platform
) = "i686";
383 else if (CPU_FEATURES_ARCH_P (cpu_features
, I586
))
384 GLRO(dl_platform
) = "i586";