1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/random.h>
11 #include <linux/topology.h>
12 #include <asm/processor.h>
14 #include <asm/cacheinfo.h>
16 #include <asm/spec-ctrl.h>
19 #include <asm/pci-direct.h>
20 #include <asm/delay.h>
21 #include <asm/debugreg.h>
22 #include <asm/resctrl.h>
25 # include <asm/mmconfig.h>
31 * nodes_per_socket: Stores the number of nodes per socket.
32 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
33 * Node Identifiers[10:8]
35 static u32 nodes_per_socket
= 1;
37 static inline int rdmsrl_amd_safe(unsigned msr
, unsigned long long *p
)
42 WARN_ONCE((boot_cpu_data
.x86
!= 0xf),
43 "%s should only be used on K8!\n", __func__
);
48 err
= rdmsr_safe_regs(gprs
);
50 *p
= gprs
[0] | ((u64
)gprs
[2] << 32);
55 static inline int wrmsrl_amd_safe(unsigned msr
, unsigned long long val
)
59 WARN_ONCE((boot_cpu_data
.x86
!= 0xf),
60 "%s should only be used on K8!\n", __func__
);
67 return wrmsr_safe_regs(gprs
);
71 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
72 * misexecution of code under Linux. Owners of such processors should
73 * contact AMD for precise details and a CPU swap.
75 * See http://www.multimania.com/poulot/k6bug.html
76 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
77 * (Publication # 21266 Issue Date: August 1998)
79 * The following test is erm.. interesting. AMD neglected to up
80 * the chip setting when fixing the bug but they also tweaked some
81 * performance at the same time..
85 extern __visible
void vide(void);
88 ".type vide, @function\n"
93 static void init_amd_k5(struct cpuinfo_x86
*c
)
97 * General Systems BIOSen alias the cpu frequency registers
98 * of the Elan at 0x000df000. Unfortunately, one of the Linux
99 * drivers subsequently pokes it, and changes the CPU speed.
100 * Workaround : Remove the unneeded alias.
102 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
103 #define CBAR_ENB (0x80000000)
104 #define CBAR_KEY (0X000000CB)
105 if (c
->x86_model
== 9 || c
->x86_model
== 10) {
106 if (inl(CBAR
) & CBAR_ENB
)
107 outl(0 | CBAR_KEY
, CBAR
);
112 static void init_amd_k6(struct cpuinfo_x86
*c
)
116 int mbytes
= get_num_physpages() >> (20-PAGE_SHIFT
);
118 if (c
->x86_model
< 6) {
119 /* Based on AMD doc 20734R - June 2000 */
120 if (c
->x86_model
== 0) {
121 clear_cpu_cap(c
, X86_FEATURE_APIC
);
122 set_cpu_cap(c
, X86_FEATURE_PGE
);
127 if (c
->x86_model
== 6 && c
->x86_stepping
== 1) {
128 const int K6_BUG_LOOP
= 1000000;
130 void (*f_vide
)(void);
133 pr_info("AMD K6 stepping B detected - ");
136 * It looks like AMD fixed the 2.6.2 bug and improved indirect
137 * calls at the same time.
142 OPTIMIZER_HIDE_VAR(f_vide
);
149 if (d
> 20*K6_BUG_LOOP
)
150 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
152 pr_cont("probably OK (after B9730xxxx).\n");
155 /* K6 with old style WHCR */
156 if (c
->x86_model
< 8 ||
157 (c
->x86_model
== 8 && c
->x86_stepping
< 8)) {
158 /* We can only write allocate on the low 508Mb */
162 rdmsr(MSR_K6_WHCR
, l
, h
);
163 if ((l
&0x0000FFFF) == 0) {
165 l
= (1<<0)|((mbytes
/4)<<1);
166 local_irq_save(flags
);
168 wrmsr(MSR_K6_WHCR
, l
, h
);
169 local_irq_restore(flags
);
170 pr_info("Enabling old style K6 write allocation for %d Mb\n",
176 if ((c
->x86_model
== 8 && c
->x86_stepping
> 7) ||
177 c
->x86_model
== 9 || c
->x86_model
== 13) {
178 /* The more serious chips .. */
183 rdmsr(MSR_K6_WHCR
, l
, h
);
184 if ((l
&0xFFFF0000) == 0) {
186 l
= ((mbytes
>>2)<<22)|(1<<16);
187 local_irq_save(flags
);
189 wrmsr(MSR_K6_WHCR
, l
, h
);
190 local_irq_restore(flags
);
191 pr_info("Enabling new style K6 write allocation for %d Mb\n",
198 if (c
->x86_model
== 10) {
199 /* AMD Geode LX is model 10 */
200 /* placeholder for any needed mods */
206 static void init_amd_k7(struct cpuinfo_x86
*c
)
212 * Bit 15 of Athlon specific MSR 15, needs to be 0
213 * to enable SSE on Palomino/Morgan/Barton CPU's.
214 * If the BIOS didn't enable it already, enable it here.
216 if (c
->x86_model
>= 6 && c
->x86_model
<= 10) {
217 if (!cpu_has(c
, X86_FEATURE_XMM
)) {
218 pr_info("Enabling disabled K7/SSE Support.\n");
219 msr_clear_bit(MSR_K7_HWCR
, 15);
220 set_cpu_cap(c
, X86_FEATURE_XMM
);
225 * It's been determined by AMD that Athlons since model 8 stepping 1
226 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
227 * As per AMD technical note 27212 0.2
229 if ((c
->x86_model
== 8 && c
->x86_stepping
>= 1) || (c
->x86_model
> 8)) {
230 rdmsr(MSR_K7_CLK_CTL
, l
, h
);
231 if ((l
& 0xfff00000) != 0x20000000) {
232 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
233 l
, ((l
& 0x000fffff)|0x20000000));
234 wrmsr(MSR_K7_CLK_CTL
, (l
& 0x000fffff)|0x20000000, h
);
238 /* calling is from identify_secondary_cpu() ? */
243 * Certain Athlons might work (for various values of 'work') in SMP
244 * but they are not certified as MP capable.
246 /* Athlon 660/661 is valid. */
247 if ((c
->x86_model
== 6) && ((c
->x86_stepping
== 0) ||
248 (c
->x86_stepping
== 1)))
251 /* Duron 670 is valid */
252 if ((c
->x86_model
== 7) && (c
->x86_stepping
== 0))
256 * Athlon 662, Duron 671, and Athlon >model 7 have capability
257 * bit. It's worth noting that the A5 stepping (662) of some
258 * Athlon XP's have the MP bit set.
259 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
262 if (((c
->x86_model
== 6) && (c
->x86_stepping
>= 2)) ||
263 ((c
->x86_model
== 7) && (c
->x86_stepping
>= 1)) ||
265 if (cpu_has(c
, X86_FEATURE_MP
))
268 /* If we get here, not a certified SMP capable AMD system. */
271 * Don't taint if we are running SMP kernel on a single non-MP
274 WARN_ONCE(1, "WARNING: This combination of AMD"
275 " processors is not suitable for SMP.\n");
276 add_taint(TAINT_CPU_OUT_OF_SPEC
, LOCKDEP_NOW_UNRELIABLE
);
282 * To workaround broken NUMA config. Read the comment in
283 * srat_detect_node().
285 static int nearby_node(int apicid
)
289 for (i
= apicid
- 1; i
>= 0; i
--) {
290 node
= __apicid_to_node
[i
];
291 if (node
!= NUMA_NO_NODE
&& node_online(node
))
294 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
295 node
= __apicid_to_node
[i
];
296 if (node
!= NUMA_NO_NODE
&& node_online(node
))
299 return first_node(node_online_map
); /* Shouldn't happen */
304 * Fix up topo::core_id for pre-F17h systems to be in the
305 * [0 .. cores_per_node - 1] range. Not really needed but
306 * kept so as not to break existing setups.
308 static void legacy_fixup_core_id(struct cpuinfo_x86
*c
)
315 cus_per_node
= c
->x86_max_cores
/ nodes_per_socket
;
316 c
->topo
.core_id
%= cus_per_node
;
320 * Fixup core topology information for
321 * (1) AMD multi-node processors
322 * Assumption: Number of cores in each internal node is the same.
323 * (2) AMD processors supporting compute units
325 static void amd_get_topology(struct cpuinfo_x86
*c
)
327 /* get information required for multi-node processors */
328 if (boot_cpu_has(X86_FEATURE_TOPOEXT
)) {
330 u32 eax
, ebx
, ecx
, edx
;
332 cpuid(0x8000001e, &eax
, &ebx
, &ecx
, &edx
);
334 c
->topo
.die_id
= ecx
& 0xff;
337 c
->topo
.cu_id
= ebx
& 0xff;
339 if (c
->x86
>= 0x17) {
340 c
->topo
.core_id
= ebx
& 0xff;
342 if (smp_num_siblings
> 1)
343 c
->x86_max_cores
/= smp_num_siblings
;
347 * In case leaf B is available, use it to derive
348 * topology information.
350 err
= detect_extended_topology(c
);
352 c
->x86_coreid_bits
= get_count_order(c
->x86_max_cores
);
354 cacheinfo_amd_init_llc_id(c
);
356 } else if (cpu_has(c
, X86_FEATURE_NODEID_MSR
)) {
359 rdmsrl(MSR_FAM10H_NODE_ID
, value
);
360 c
->topo
.die_id
= value
& 7;
361 c
->topo
.llc_id
= c
->topo
.die_id
;
365 if (nodes_per_socket
> 1) {
366 set_cpu_cap(c
, X86_FEATURE_AMD_DCM
);
367 legacy_fixup_core_id(c
);
372 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
373 * Assumes number of cores is a power of two.
375 static void amd_detect_cmp(struct cpuinfo_x86
*c
)
379 bits
= c
->x86_coreid_bits
;
380 /* Low order bits define the core id (index of core in socket) */
381 c
->topo
.core_id
= c
->topo
.initial_apicid
& ((1 << bits
)-1);
382 /* Convert the initial APIC ID into the socket ID */
383 c
->topo
.pkg_id
= c
->topo
.initial_apicid
>> bits
;
384 /* use socket ID also for last level cache */
385 c
->topo
.llc_id
= c
->topo
.die_id
= c
->topo
.pkg_id
;
388 u32
amd_get_nodes_per_socket(void)
390 return nodes_per_socket
;
392 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket
);
394 static void srat_detect_node(struct cpuinfo_x86
*c
)
397 int cpu
= smp_processor_id();
399 unsigned apicid
= c
->topo
.apicid
;
401 node
= numa_cpu_node(cpu
);
402 if (node
== NUMA_NO_NODE
)
403 node
= per_cpu_llc_id(cpu
);
406 * On multi-fabric platform (e.g. Numascale NumaChip) a
407 * platform-specific handler needs to be called to fixup some
410 if (x86_cpuinit
.fixup_cpu_id
)
411 x86_cpuinit
.fixup_cpu_id(c
, node
);
413 if (!node_online(node
)) {
415 * Two possibilities here:
417 * - The CPU is missing memory and no node was created. In
418 * that case try picking one from a nearby CPU.
420 * - The APIC IDs differ from the HyperTransport node IDs
421 * which the K8 northbridge parsing fills in. Assume
422 * they are all increased by a constant offset, but in
423 * the same order as the HT nodeids. If that doesn't
424 * result in a usable node fall back to the path for the
427 * This workaround operates directly on the mapping between
428 * APIC ID and NUMA node, assuming certain relationship
429 * between APIC ID, HT node ID and NUMA topology. As going
430 * through CPU mapping may alter the outcome, directly
431 * access __apicid_to_node[].
433 int ht_nodeid
= c
->topo
.initial_apicid
;
435 if (__apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
436 node
= __apicid_to_node
[ht_nodeid
];
437 /* Pick a nearby node */
438 if (!node_online(node
))
439 node
= nearby_node(apicid
);
441 numa_set_node(cpu
, node
);
445 static void early_init_amd_mc(struct cpuinfo_x86
*c
)
450 /* Multi core CPU? */
451 if (c
->extended_cpuid_level
< 0x80000008)
454 ecx
= cpuid_ecx(0x80000008);
456 c
->x86_max_cores
= (ecx
& 0xff) + 1;
458 /* CPU telling us the core id bits shift? */
459 bits
= (ecx
>> 12) & 0xF;
461 /* Otherwise recompute */
463 while ((1 << bits
) < c
->x86_max_cores
)
467 c
->x86_coreid_bits
= bits
;
471 static void bsp_init_amd(struct cpuinfo_x86
*c
)
473 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
)) {
476 (c
->x86
== 0x10 && c
->x86_model
>= 0x2)) {
479 rdmsrl(MSR_K7_HWCR
, val
);
480 if (!(val
& BIT(24)))
481 pr_warn(FW_BUG
"TSC doesn't count with P0 frequency!\n");
485 if (c
->x86
== 0x15) {
486 unsigned long upperbit
;
489 cpuid
= cpuid_edx(0x80000005);
490 assoc
= cpuid
>> 16 & 0xff;
491 upperbit
= ((cpuid
>> 24) << 10) / assoc
;
493 va_align
.mask
= (upperbit
- 1) & PAGE_MASK
;
494 va_align
.flags
= ALIGN_VA_32
| ALIGN_VA_64
;
496 /* A random value per boot for bit slice [12:upper_bit) */
497 va_align
.bits
= get_random_u32() & va_align
.mask
;
500 if (cpu_has(c
, X86_FEATURE_MWAITX
))
503 if (boot_cpu_has(X86_FEATURE_TOPOEXT
)) {
506 ecx
= cpuid_ecx(0x8000001e);
507 __max_die_per_package
= nodes_per_socket
= ((ecx
>> 8) & 7) + 1;
508 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR
)) {
511 rdmsrl(MSR_FAM10H_NODE_ID
, value
);
512 __max_die_per_package
= nodes_per_socket
= ((value
>> 3) & 7) + 1;
515 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD
) &&
516 !boot_cpu_has(X86_FEATURE_VIRT_SSBD
) &&
517 c
->x86
>= 0x15 && c
->x86
<= 0x17) {
521 case 0x15: bit
= 54; break;
522 case 0x16: bit
= 33; break;
523 case 0x17: bit
= 10; break;
527 * Try to cache the base value so further operations can
528 * avoid RMW. If that faults, do not enable SSBD.
530 if (!rdmsrl_safe(MSR_AMD64_LS_CFG
, &x86_amd_ls_cfg_base
)) {
531 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD
);
532 setup_force_cpu_cap(X86_FEATURE_SSBD
);
533 x86_amd_ls_cfg_ssbd_mask
= 1ULL << bit
;
537 resctrl_cpu_detect(c
);
539 /* Figure out Zen generations: */
542 switch (c
->x86_model
) {
545 setup_force_cpu_cap(X86_FEATURE_ZEN1
);
551 setup_force_cpu_cap(X86_FEATURE_ZEN2
);
559 switch (c
->x86_model
) {
562 setup_force_cpu_cap(X86_FEATURE_ZEN3
);
566 setup_force_cpu_cap(X86_FEATURE_ZEN4
);
574 switch (c
->x86_model
) {
579 setup_force_cpu_cap(X86_FEATURE_ZEN5
);
593 WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c
->x86
, c
->x86_model
);
596 static void early_detect_mem_encrypt(struct cpuinfo_x86
*c
)
601 * BIOS support is required for SME and SEV.
602 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
603 * the SME physical address space reduction value.
604 * If BIOS has not enabled SME then don't advertise the
605 * SME feature (set in scattered.c).
606 * If the kernel has not enabled SME via any means then
607 * don't advertise the SME feature.
608 * For SEV: If BIOS has not enabled SEV then don't advertise the
609 * SEV and SEV_ES feature (set in scattered.c).
611 * In all cases, since support for SME and SEV requires long mode,
612 * don't advertise the feature under CONFIG_X86_32.
614 if (cpu_has(c
, X86_FEATURE_SME
) || cpu_has(c
, X86_FEATURE_SEV
)) {
615 /* Check if memory encryption is enabled */
616 rdmsrl(MSR_AMD64_SYSCFG
, msr
);
617 if (!(msr
& MSR_AMD64_SYSCFG_MEM_ENCRYPT
))
621 * Always adjust physical address bits. Even though this
622 * will be a value above 32-bits this is still done for
623 * CONFIG_X86_32 so that accurate values are reported.
625 c
->x86_phys_bits
-= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
627 if (IS_ENABLED(CONFIG_X86_32
))
631 setup_clear_cpu_cap(X86_FEATURE_SME
);
633 rdmsrl(MSR_K7_HWCR
, msr
);
634 if (!(msr
& MSR_K7_HWCR_SMMLOCK
))
640 setup_clear_cpu_cap(X86_FEATURE_SME
);
642 setup_clear_cpu_cap(X86_FEATURE_SEV
);
643 setup_clear_cpu_cap(X86_FEATURE_SEV_ES
);
647 static void early_init_amd(struct cpuinfo_x86
*c
)
652 early_init_amd_mc(c
);
655 set_cpu_cap(c
, X86_FEATURE_K8
);
657 rdmsr_safe(MSR_AMD64_PATCH_LEVEL
, &c
->microcode
, &dummy
);
660 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
661 * with P/T states and does not stop in deep C-states
663 if (c
->x86_power
& (1 << 8)) {
664 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
665 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
668 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
669 if (c
->x86_power
& BIT(12))
670 set_cpu_cap(c
, X86_FEATURE_ACC_POWER
);
672 /* Bit 14 indicates the Runtime Average Power Limit interface. */
673 if (c
->x86_power
& BIT(14))
674 set_cpu_cap(c
, X86_FEATURE_RAPL
);
677 set_cpu_cap(c
, X86_FEATURE_SYSCALL32
);
679 /* Set MTRR capability flag if appropriate */
681 if (c
->x86_model
== 13 || c
->x86_model
== 9 ||
682 (c
->x86_model
== 8 && c
->x86_stepping
>= 8))
683 set_cpu_cap(c
, X86_FEATURE_K6_MTRR
);
685 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
687 * ApicID can always be treated as an 8-bit value for AMD APIC versions
688 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
689 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
692 if (boot_cpu_has(X86_FEATURE_APIC
)) {
694 set_cpu_cap(c
, X86_FEATURE_EXTD_APICID
);
695 else if (c
->x86
>= 0xf) {
696 /* check CPU config space for extended APIC ID */
699 val
= read_pci_config(0, 24, 0, 0x68);
700 if ((val
>> 17 & 0x3) == 0x3)
701 set_cpu_cap(c
, X86_FEATURE_EXTD_APICID
);
707 * This is only needed to tell the kernel whether to use VMCALL
708 * and VMMCALL. VMMCALL is never executed except under virt, so
709 * we can set it unconditionally.
711 set_cpu_cap(c
, X86_FEATURE_VMMCALL
);
713 /* F16h erratum 793, CVE-2013-6885 */
714 if (c
->x86
== 0x16 && c
->x86_model
<= 0xf)
715 msr_set_bit(MSR_AMD64_LS_CFG
, 15);
717 early_detect_mem_encrypt(c
);
719 /* Re-enable TopologyExtensions if switched off by BIOS */
720 if (c
->x86
== 0x15 &&
721 (c
->x86_model
>= 0x10 && c
->x86_model
<= 0x6f) &&
722 !cpu_has(c
, X86_FEATURE_TOPOEXT
)) {
724 if (msr_set_bit(0xc0011005, 54) > 0) {
725 rdmsrl(0xc0011005, value
);
726 if (value
& BIT_64(54)) {
727 set_cpu_cap(c
, X86_FEATURE_TOPOEXT
);
728 pr_info_once(FW_INFO
"CPU: Re-enabling disabled Topology Extensions Support.\n");
733 if (cpu_has(c
, X86_FEATURE_TOPOEXT
))
734 smp_num_siblings
= ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
736 if (!cpu_has(c
, X86_FEATURE_HYPERVISOR
) && !cpu_has(c
, X86_FEATURE_IBPB_BRTYPE
)) {
737 if (c
->x86
== 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB
))
738 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE
);
739 else if (c
->x86
>= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD
, PRED_CMD_SBPB
)) {
740 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE
);
741 setup_force_cpu_cap(X86_FEATURE_SBPB
);
746 static void init_amd_k8(struct cpuinfo_x86
*c
)
751 /* On C+ stepping K8 rep microcode works well for copy/memset */
752 level
= cpuid_eax(1);
753 if ((level
>= 0x0f48 && level
< 0x0f50) || level
>= 0x0f58)
754 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
757 * Some BIOSes incorrectly force this feature, but only K8 revision D
758 * (model = 0x14) and later actually support it.
759 * (AMD Erratum #110, docId: 25759).
761 if (c
->x86_model
< 0x14 && cpu_has(c
, X86_FEATURE_LAHF_LM
)) {
762 clear_cpu_cap(c
, X86_FEATURE_LAHF_LM
);
763 if (!rdmsrl_amd_safe(0xc001100d, &value
)) {
764 value
&= ~BIT_64(32);
765 wrmsrl_amd_safe(0xc001100d, value
);
769 if (!c
->x86_model_id
[0])
770 strcpy(c
->x86_model_id
, "Hammer");
774 * Disable TLB flush filter by setting HWCR.FFDIS on K8
775 * bit 6 of msr C001_0015
777 * Errata 63 for SH-B3 steppings
778 * Errata 122 for all steppings (F+ have it disabled by default)
780 msr_set_bit(MSR_K7_HWCR
, 6);
782 set_cpu_bug(c
, X86_BUG_SWAPGS_FENCE
);
785 * Check models and steppings affected by erratum 400. This is
786 * used to select the proper idle routine and to enable the
787 * check whether the machine is affected in arch_post_acpi_subsys_init()
788 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
790 if (c
->x86_model
> 0x41 ||
791 (c
->x86_model
== 0x41 && c
->x86_stepping
>= 0x2))
792 setup_force_cpu_bug(X86_BUG_AMD_E400
);
795 static void init_amd_gh(struct cpuinfo_x86
*c
)
797 #ifdef CONFIG_MMCONF_FAM10H
798 /* do this for boot cpu */
799 if (c
== &boot_cpu_data
)
800 check_enable_amd_mmconf_dmi();
802 fam10h_check_enable_mmcfg();
806 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
807 * is always needed when GART is enabled, even in a kernel which has no
808 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
809 * If it doesn't, we do it here as suggested by the BKDG.
811 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
813 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
816 * On family 10h BIOS may not have properly enabled WC+ support, causing
817 * it to be converted to CD memtype. This may result in performance
818 * degradation for certain nested-paging guests. Prevent this conversion
819 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
821 * NOTE: we want to use the _safe accessors so as not to #GP kvm
822 * guests on older kvm hosts.
824 msr_clear_bit(MSR_AMD64_BU_CFG2
, 24);
826 set_cpu_bug(c
, X86_BUG_AMD_TLB_MMATCH
);
829 * Check models and steppings affected by erratum 400. This is
830 * used to select the proper idle routine and to enable the
831 * check whether the machine is affected in arch_post_acpi_subsys_init()
832 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
834 if (c
->x86_model
> 0x2 ||
835 (c
->x86_model
== 0x2 && c
->x86_stepping
>= 0x1))
836 setup_force_cpu_bug(X86_BUG_AMD_E400
);
839 static void init_amd_ln(struct cpuinfo_x86
*c
)
842 * Apply erratum 665 fix unconditionally so machines without a BIOS
845 msr_set_bit(MSR_AMD64_DE_CFG
, 31);
848 static bool rdrand_force
;
850 static int __init
rdrand_cmdline(char *str
)
855 if (!strcmp(str
, "force"))
862 early_param("rdrand", rdrand_cmdline
);
864 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86
*c
)
867 * Saving of the MSR used to hide the RDRAND support during
868 * suspend/resume is done by arch/x86/power/cpu.c, which is
869 * dependent on CONFIG_PM_SLEEP.
871 if (!IS_ENABLED(CONFIG_PM_SLEEP
))
875 * The self-test can clear X86_FEATURE_RDRAND, so check for
876 * RDRAND support using the CPUID function directly.
878 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force
)
881 msr_clear_bit(MSR_AMD64_CPUID_FN_1
, 62);
884 * Verify that the CPUID change has occurred in case the kernel is
885 * running virtualized and the hypervisor doesn't support the MSR.
887 if (cpuid_ecx(1) & BIT(30)) {
888 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
892 clear_cpu_cap(c
, X86_FEATURE_RDRAND
);
893 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
896 static void init_amd_jg(struct cpuinfo_x86
*c
)
899 * Some BIOS implementations do not restore proper RDRAND support
900 * across suspend and resume. Check on whether to hide the RDRAND
901 * instruction support via CPUID.
903 clear_rdrand_cpuid_bit(c
);
906 static void init_amd_bd(struct cpuinfo_x86
*c
)
911 * The way access filter has a performance penalty on some workloads.
912 * Disable it on the affected CPUs.
914 if ((c
->x86_model
>= 0x02) && (c
->x86_model
< 0x20)) {
915 if (!rdmsrl_safe(MSR_F15H_IC_CFG
, &value
) && !(value
& 0x1E)) {
917 wrmsrl_safe(MSR_F15H_IC_CFG
, value
);
922 * Some BIOS implementations do not restore proper RDRAND support
923 * across suspend and resume. Check on whether to hide the RDRAND
924 * instruction support via CPUID.
926 clear_rdrand_cpuid_bit(c
);
929 static void fix_erratum_1386(struct cpuinfo_x86
*c
)
932 * Work around Erratum 1386. The XSAVES instruction malfunctions in
933 * certain circumstances on Zen1/2 uarch, and not all parts have had
934 * updated microcode at the time of writing (March 2023).
936 * Affected parts all have no supervisor XSAVE states, meaning that
937 * the XSAVEC instruction (which works fine) is equivalent.
939 clear_cpu_cap(c
, X86_FEATURE_XSAVES
);
942 void init_spectral_chicken(struct cpuinfo_x86
*c
)
944 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
948 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
950 * This suppresses speculation from the middle of a basic block, i.e. it
951 * suppresses non-branch predictions.
953 if (!cpu_has(c
, X86_FEATURE_HYPERVISOR
)) {
954 if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN
, &value
)) {
955 value
|= MSR_ZEN2_SPECTRAL_CHICKEN_BIT
;
956 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN
, value
);
962 static void init_amd_zen_common(void)
964 setup_force_cpu_cap(X86_FEATURE_ZEN
);
966 node_reclaim_distance
= 32;
970 static void init_amd_zen1(struct cpuinfo_x86
*c
)
972 init_amd_zen_common();
975 /* Fix up CPUID bits, but only if not virtualised. */
976 if (!cpu_has(c
, X86_FEATURE_HYPERVISOR
)) {
978 /* Erratum 1076: CPB feature bit not being set in CPUID. */
979 if (!cpu_has(c
, X86_FEATURE_CPB
))
980 set_cpu_cap(c
, X86_FEATURE_CPB
);
983 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
984 setup_force_cpu_bug(X86_BUG_DIV0
);
987 static bool cpu_has_zenbleed_microcode(void)
991 switch (boot_cpu_data
.x86_model
) {
992 case 0x30 ... 0x3f: good_rev
= 0x0830107a; break;
993 case 0x60 ... 0x67: good_rev
= 0x0860010b; break;
994 case 0x68 ... 0x6f: good_rev
= 0x08608105; break;
995 case 0x70 ... 0x7f: good_rev
= 0x08701032; break;
996 case 0xa0 ... 0xaf: good_rev
= 0x08a00008; break;
1002 if (boot_cpu_data
.microcode
< good_rev
)
1008 static void zen2_zenbleed_check(struct cpuinfo_x86
*c
)
1010 if (cpu_has(c
, X86_FEATURE_HYPERVISOR
))
1013 if (!cpu_has(c
, X86_FEATURE_AVX
))
1016 if (!cpu_has_zenbleed_microcode()) {
1017 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
1018 msr_set_bit(MSR_AMD64_DE_CFG
, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT
);
1020 msr_clear_bit(MSR_AMD64_DE_CFG
, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT
);
1024 static void init_amd_zen2(struct cpuinfo_x86
*c
)
1026 init_amd_zen_common();
1027 init_spectral_chicken(c
);
1028 fix_erratum_1386(c
);
1029 zen2_zenbleed_check(c
);
1032 static void init_amd_zen3(struct cpuinfo_x86
*c
)
1034 init_amd_zen_common();
1036 if (!cpu_has(c
, X86_FEATURE_HYPERVISOR
)) {
1038 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
1039 * Branch Type Confusion, but predate the allocation of the
1042 if (!cpu_has(c
, X86_FEATURE_BTC_NO
))
1043 set_cpu_cap(c
, X86_FEATURE_BTC_NO
);
1047 static void init_amd_zen4(struct cpuinfo_x86
*c
)
1049 init_amd_zen_common();
1051 if (!cpu_has(c
, X86_FEATURE_HYPERVISOR
))
1052 msr_set_bit(MSR_ZEN4_BP_CFG
, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT
);
1055 static void init_amd_zen5(struct cpuinfo_x86
*c
)
1057 init_amd_zen_common();
1060 static void init_amd(struct cpuinfo_x86
*c
)
1067 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1068 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
1070 clear_cpu_cap(c
, 0*32+31);
1073 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
1075 /* AMD FSRM also implies FSRS */
1076 if (cpu_has(c
, X86_FEATURE_FSRM
))
1077 set_cpu_cap(c
, X86_FEATURE_FSRS
);
1079 /* get apicid instead of initial apic id from cpuid */
1080 c
->topo
.apicid
= read_apic_id();
1082 /* K6s reports MCEs but don't actually have all the MSRs */
1084 clear_cpu_cap(c
, X86_FEATURE_MCE
);
1087 case 4: init_amd_k5(c
); break;
1088 case 5: init_amd_k6(c
); break;
1089 case 6: init_amd_k7(c
); break;
1090 case 0xf: init_amd_k8(c
); break;
1091 case 0x10: init_amd_gh(c
); break;
1092 case 0x12: init_amd_ln(c
); break;
1093 case 0x15: init_amd_bd(c
); break;
1094 case 0x16: init_amd_jg(c
); break;
1097 if (boot_cpu_has(X86_FEATURE_ZEN1
))
1099 else if (boot_cpu_has(X86_FEATURE_ZEN2
))
1101 else if (boot_cpu_has(X86_FEATURE_ZEN3
))
1103 else if (boot_cpu_has(X86_FEATURE_ZEN4
))
1105 else if (boot_cpu_has(X86_FEATURE_ZEN5
))
1109 * Enable workaround for FXSAVE leak on CPUs
1110 * without a XSaveErPtr feature
1112 if ((c
->x86
>= 6) && (!cpu_has(c
, X86_FEATURE_XSAVEERPTR
)))
1113 set_cpu_bug(c
, X86_BUG_FXSAVE_LEAK
);
1115 cpu_detect_cache_sizes(c
);
1118 amd_get_topology(c
);
1119 srat_detect_node(c
);
1121 init_amd_cacheinfo(c
);
1123 if (cpu_has(c
, X86_FEATURE_SVM
)) {
1124 rdmsrl(MSR_VM_CR
, vm_cr
);
1125 if (vm_cr
& SVM_VM_CR_SVM_DIS_MASK
) {
1126 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
1127 clear_cpu_cap(c
, X86_FEATURE_SVM
);
1131 if (!cpu_has(c
, X86_FEATURE_LFENCE_RDTSC
) && cpu_has(c
, X86_FEATURE_XMM2
)) {
1133 * Use LFENCE for execution serialization. On families which
1134 * don't have that MSR, LFENCE is already serializing.
1135 * msr_set_bit() uses the safe accessors, too, even if the MSR
1138 msr_set_bit(MSR_AMD64_DE_CFG
,
1139 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT
);
1141 /* A serializing LFENCE stops RDTSC speculation */
1142 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
1146 * Family 0x12 and above processors have APIC timer
1147 * running in deep C states.
1150 set_cpu_cap(c
, X86_FEATURE_ARAT
);
1152 /* 3DNow or LM implies PREFETCHW */
1153 if (!cpu_has(c
, X86_FEATURE_3DNOWPREFETCH
))
1154 if (cpu_has(c
, X86_FEATURE_3DNOW
) || cpu_has(c
, X86_FEATURE_LM
))
1155 set_cpu_cap(c
, X86_FEATURE_3DNOWPREFETCH
);
1157 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1158 if (!cpu_feature_enabled(X86_FEATURE_XENPV
))
1159 set_cpu_bug(c
, X86_BUG_SYSRET_SS_ATTRS
);
1162 * Turn on the Instructions Retired free counter on machines not
1163 * susceptible to erratum #1054 "Instructions Retired Performance
1164 * Counter May Be Inaccurate".
1166 if (cpu_has(c
, X86_FEATURE_IRPERF
) &&
1167 (boot_cpu_has(X86_FEATURE_ZEN1
) && c
->x86_model
> 0x2f))
1168 msr_set_bit(MSR_K7_HWCR
, MSR_K7_HWCR_IRPERF_EN_BIT
);
1170 check_null_seg_clears_base(c
);
1173 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1174 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1175 * order to be replicated onto them. Regardless, set it here again, if not set,
1176 * to protect against any future refactoring/code reorganization which might
1177 * miss setting this important bit.
1179 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
1180 cpu_has(c
, X86_FEATURE_AUTOIBRS
))
1181 WARN_ON_ONCE(msr_set_bit(MSR_EFER
, _EFER_AUTOIBRS
));
1183 /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1184 clear_cpu_cap(c
, X86_FEATURE_APIC_MSRS_FENCE
);
1187 #ifdef CONFIG_X86_32
1188 static unsigned int amd_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
1190 /* AMD errata T13 (order #21922) */
1193 if (c
->x86_model
== 3 && c
->x86_stepping
== 0)
1195 /* Tbird rev A1/A2 */
1196 if (c
->x86_model
== 4 &&
1197 (c
->x86_stepping
== 0 || c
->x86_stepping
== 1))
1204 static void cpu_detect_tlb_amd(struct cpuinfo_x86
*c
)
1206 u32 ebx
, eax
, ecx
, edx
;
1212 if (c
->extended_cpuid_level
< 0x80000006)
1215 cpuid(0x80000006, &eax
, &ebx
, &ecx
, &edx
);
1217 tlb_lld_4k
[ENTRIES
] = (ebx
>> 16) & mask
;
1218 tlb_lli_4k
[ENTRIES
] = ebx
& mask
;
1221 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1222 * characteristics from the CPUID function 0x80000005 instead.
1224 if (c
->x86
== 0xf) {
1225 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
1229 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1230 if (!((eax
>> 16) & mask
))
1231 tlb_lld_2m
[ENTRIES
] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1233 tlb_lld_2m
[ENTRIES
] = (eax
>> 16) & mask
;
1235 /* a 4M entry uses two 2M entries */
1236 tlb_lld_4m
[ENTRIES
] = tlb_lld_2m
[ENTRIES
] >> 1;
1238 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1239 if (!(eax
& mask
)) {
1241 if (c
->x86
== 0x15 && c
->x86_model
<= 0x1f) {
1242 tlb_lli_2m
[ENTRIES
] = 1024;
1244 cpuid(0x80000005, &eax
, &ebx
, &ecx
, &edx
);
1245 tlb_lli_2m
[ENTRIES
] = eax
& 0xff;
1248 tlb_lli_2m
[ENTRIES
] = eax
& mask
;
1250 tlb_lli_4m
[ENTRIES
] = tlb_lli_2m
[ENTRIES
] >> 1;
1253 static const struct cpu_dev amd_cpu_dev
= {
1255 .c_ident
= { "AuthenticAMD" },
1256 #ifdef CONFIG_X86_32
1258 { .family
= 4, .model_names
=
1261 [7] = "486 DX/2-WB",
1263 [9] = "486 DX/4-WB",
1269 .legacy_cache_size
= amd_size_cache
,
1271 .c_early_init
= early_init_amd
,
1272 .c_detect_tlb
= cpu_detect_tlb_amd
,
1273 .c_bsp_init
= bsp_init_amd
,
1275 .c_x86_vendor
= X86_VENDOR_AMD
,
1278 cpu_dev_register(amd_cpu_dev
);
1280 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask
);
1282 static unsigned int amd_msr_dr_addr_masks
[] = {
1283 MSR_F16H_DR0_ADDR_MASK
,
1284 MSR_F16H_DR1_ADDR_MASK
,
1285 MSR_F16H_DR1_ADDR_MASK
+ 1,
1286 MSR_F16H_DR1_ADDR_MASK
+ 2
1289 void amd_set_dr_addr_mask(unsigned long mask
, unsigned int dr
)
1291 int cpu
= smp_processor_id();
1293 if (!cpu_feature_enabled(X86_FEATURE_BPEXT
))
1296 if (WARN_ON_ONCE(dr
>= ARRAY_SIZE(amd_msr_dr_addr_masks
)))
1299 if (per_cpu(amd_dr_addr_mask
, cpu
)[dr
] == mask
)
1302 wrmsr(amd_msr_dr_addr_masks
[dr
], mask
, 0);
1303 per_cpu(amd_dr_addr_mask
, cpu
)[dr
] = mask
;
1306 unsigned long amd_get_dr_addr_mask(unsigned int dr
)
1308 if (!cpu_feature_enabled(X86_FEATURE_BPEXT
))
1311 if (WARN_ON_ONCE(dr
>= ARRAY_SIZE(amd_msr_dr_addr_masks
)))
1314 return per_cpu(amd_dr_addr_mask
[dr
], smp_processor_id());
1316 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask
);
1318 u32
amd_get_highest_perf(void)
1320 struct cpuinfo_x86
*c
= &boot_cpu_data
;
1322 if (c
->x86
== 0x17 && ((c
->x86_model
>= 0x30 && c
->x86_model
< 0x40) ||
1323 (c
->x86_model
>= 0x70 && c
->x86_model
< 0x80)))
1326 if (c
->x86
== 0x19 && ((c
->x86_model
>= 0x20 && c
->x86_model
< 0x30) ||
1327 (c
->x86_model
>= 0x40 && c
->x86_model
< 0x70)))
1332 EXPORT_SYMBOL_GPL(amd_get_highest_perf
);
1334 static void zenbleed_check_cpu(void *unused
)
1336 struct cpuinfo_x86
*c
= &cpu_data(smp_processor_id());
1338 zen2_zenbleed_check(c
);
1341 void amd_check_microcode(void)
1343 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
)
1346 on_each_cpu(zenbleed_check_cpu
, NULL
, 1);
1350 * Issue a DIV 0/1 insn to clear any division data from previous DIV
1353 void noinstr
amd_clear_divider(void)
1355 asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0
)
1356 :: "a" (0), "d" (0), "r" (1));
1358 EXPORT_SYMBOL_GPL(amd_clear_divider
);