From: Linus Torvalds Date: Tue, 2 Dec 2025 19:35:49 +0000 (-0800) Subject: Merge tag 'x86_microcode_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2a47c26e55a2bc085a2349ed1d4e065ee298155f;p=thirdparty%2Flinux.git Merge tag 'x86_microcode_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 microcode loading updates from Borislav Petkov: - Add microcode staging support on Intel: it moves the sole microcode blobs loading to a non-critical path so that microcode loading latencies are kept at minimum. The actual "directing" the hardware to load microcode is the only step which is done on the critical path. This scheme is also opportunistic as in: on a failure, the machinery falls back to normal loading - Add the capability to the AMD side of the loader to select one of two per-family/model/stepping patches: one is pre-Entrysign and the other is post-Entrysign; with the goal to take care of machines which haven't updated their BIOS yet - something they should absolutely do as this is the only proper Entrysign fix - Other small cleanups and fixlets * tag 'x86_microcode_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode: Mark early_parse_cmdline() as __init x86/microcode/AMD: Select which microcode patch to load x86/microcode/intel: Enable staging when available x86/microcode/intel: Support mailbox transfer x86/microcode/intel: Implement staging handler x86/microcode/intel: Define staging state struct x86/microcode/intel: Establish staging control logic x86/microcode: Introduce staging step to reduce late-loading time x86/cpu/topology: Make primary thread mask available with SMP=n --- 2a47c26e55a2bc085a2349ed1d4e065ee298155f diff --cc arch/x86/kernel/cpu/microcode/amd.c index a881bf4c20114,8d3d1114881bf..3821a985f4ffe --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@@ -186,8 -186,50 +186,53 @@@ static u32 cpuid_to_ucode_rev(unsigned return p.ucode_rev; } + static u32 get_cutoff_revision(u32 rev) + { + switch (rev >> 8) { + case 0x80012: return 0x8001277; break; + case 0x80082: return 0x800820f; break; + case 0x83010: return 0x830107c; break; + case 0x86001: return 0x860010e; break; + case 0x86081: return 0x8608108; break; + case 0x87010: return 0x8701034; break; + case 0x8a000: return 0x8a0000a; break; + case 0xa0010: return 0xa00107a; break; + case 0xa0011: return 0xa0011da; break; + case 0xa0012: return 0xa001243; break; + case 0xa0082: return 0xa00820e; break; + case 0xa1011: return 0xa101153; break; + case 0xa1012: return 0xa10124e; break; + case 0xa1081: return 0xa108109; break; + case 0xa2010: return 0xa20102f; break; + case 0xa2012: return 0xa201212; break; + case 0xa4041: return 0xa404109; break; + case 0xa5000: return 0xa500013; break; + case 0xa6012: return 0xa60120a; break; + case 0xa7041: return 0xa704109; break; + case 0xa7052: return 0xa705208; break; + case 0xa7080: return 0xa708009; break; + case 0xa70c0: return 0xa70C009; break; + case 0xaa001: return 0xaa00116; break; + case 0xaa002: return 0xaa00218; break; + case 0xb0021: return 0xb002146; break; ++ case 0xb0081: return 0xb008111; break; + case 0xb1010: return 0xb101046; break; + case 0xb2040: return 0xb204031; break; + case 0xb4040: return 0xb404031; break; ++ case 0xb4041: return 0xb404101; break; + case 0xb6000: return 0xb600031; break; ++ case 0xb6080: return 0xb608031; break; + case 0xb7000: return 0xb700031; break; + default: break; + + } + return 0; + } + static bool need_sha_check(u32 cur_rev) { + u32 cutoff; + if (!cur_rev) { cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);