]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/x86/kernel/cpu/microcode/amd.c
2a194e3842076a5eec2f81241b06e9d2603cc225
[people/arne_f/kernel.git] / arch / x86 / kernel / cpu / microcode / amd.c
1 /*
2 * AMD CPU Microcode Update Driver for Linux
3 *
4 * This driver allows to upgrade microcode on F10h AMD
5 * CPUs and later.
6 *
7 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8 * 2013-2016 Borislav Petkov <bp@alien8.de>
9 *
10 * Author: Peter Oruba <peter.oruba@amd.com>
11 *
12 * Based on work by:
13 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
14 *
15 * early loader:
16 * Copyright (C) 2013 Advanced Micro Devices, Inc.
17 *
18 * Author: Jacob Shin <jacob.shin@amd.com>
19 * Fixes: Borislav Petkov <bp@suse.de>
20 *
21 * Licensed under the terms of the GNU General Public
22 * License version 2. See file COPYING for details.
23 */
24 #define pr_fmt(fmt) "microcode: " fmt
25
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
33
34 #include <asm/microcode_amd.h>
35 #include <asm/microcode.h>
36 #include <asm/processor.h>
37 #include <asm/setup.h>
38 #include <asm/cpu.h>
39 #include <asm/msr.h>
40
41 static struct equiv_cpu_entry *equiv_cpu_table;
42
43 /*
44 * This points to the current valid container of microcode patches which we will
45 * save from the initrd/builtin before jettisoning its contents. @mc is the
46 * microcode patch we found to match.
47 */
48 static struct cont_desc {
49 struct microcode_amd *mc;
50 u32 cpuid_1_eax;
51 u32 psize;
52 u16 eq_id;
53 u8 *data;
54 size_t size;
55 } cont;
56
57 static u32 ucode_new_rev;
58 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
59
60 /*
61 * Microcode patch container file is prepended to the initrd in cpio
62 * format. See Documentation/x86/early-microcode.txt
63 */
64 static const char
65 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
66
67 static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
68 {
69 for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
70 if (sig == equiv_table->installed_cpu)
71 return equiv_table->equiv_cpu;
72 }
73
74 return 0;
75 }
76
77 /*
78 * This scans the ucode blob for the proper container as we can have multiple
79 * containers glued together. Returns the equivalence ID from the equivalence
80 * table or 0 if none found.
81 * Returns the amount of bytes consumed while scanning. @desc contains all the
82 * data we're going to use in later stages of the application.
83 */
84 static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
85 {
86 struct equiv_cpu_entry *eq;
87 ssize_t orig_size = size;
88 u32 *hdr = (u32 *)ucode;
89 u16 eq_id;
90 u8 *buf;
91
92 /* Am I looking at an equivalence table header? */
93 if (hdr[0] != UCODE_MAGIC ||
94 hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
95 hdr[2] == 0) {
96 desc->eq_id = 0;
97 return CONTAINER_HDR_SZ;
98 }
99
100 buf = ucode;
101
102 eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
103
104 /* Find the equivalence ID of our CPU in this table: */
105 eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
106
107 buf += hdr[2] + CONTAINER_HDR_SZ;
108 size -= hdr[2] + CONTAINER_HDR_SZ;
109
110 /*
111 * Scan through the rest of the container to find where it ends. We do
112 * some basic sanity-checking too.
113 */
114 while (size > 0) {
115 struct microcode_amd *mc;
116 u32 patch_size;
117
118 hdr = (u32 *)buf;
119
120 if (hdr[0] != UCODE_UCODE_TYPE)
121 break;
122
123 /* Sanity-check patch size. */
124 patch_size = hdr[1];
125 if (patch_size > PATCH_MAX_SIZE)
126 break;
127
128 /* Skip patch section header: */
129 buf += SECTION_HDR_SIZE;
130 size -= SECTION_HDR_SIZE;
131
132 mc = (struct microcode_amd *)buf;
133 if (eq_id == mc->hdr.processor_rev_id) {
134 desc->psize = patch_size;
135 desc->mc = mc;
136 }
137
138 buf += patch_size;
139 size -= patch_size;
140 }
141
142 /*
143 * If we have found a patch (desc->mc), it means we're looking at the
144 * container which has a patch for this CPU so return 0 to mean, @ucode
145 * already points to the proper container. Otherwise, we return the size
146 * we scanned so that we can advance to the next container in the
147 * buffer.
148 */
149 if (desc->mc) {
150 desc->eq_id = eq_id;
151 desc->data = ucode;
152 desc->size = orig_size - size;
153
154 return 0;
155 }
156
157 return orig_size - size;
158 }
159
160 /*
161 * Scan the ucode blob for the proper container as we can have multiple
162 * containers glued together.
163 */
164 static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
165 {
166 ssize_t rem = size;
167
168 while (rem >= 0) {
169 ssize_t s = parse_container(ucode, rem, desc);
170 if (!s)
171 return;
172
173 ucode += s;
174 rem -= s;
175 }
176 }
177
178 static int __apply_microcode_amd(struct microcode_amd *mc)
179 {
180 u32 rev, dummy;
181
182 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
183
184 /* verify patch application was successful */
185 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
186 if (rev != mc->hdr.patch_id)
187 return -1;
188
189 return 0;
190 }
191
192 /*
193 * Early load occurs before we can vmalloc(). So we look for the microcode
194 * patch container file in initrd, traverse equivalent cpu table, look for a
195 * matching microcode patch, and update, all in initrd memory in place.
196 * When vmalloc() is available for use later -- on 64-bit during first AP load,
197 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
198 * load_microcode_amd() to save equivalent cpu table and microcode patches in
199 * kernel heap memory.
200 *
201 * Returns true if container found (sets @desc), false otherwise.
202 */
203 static bool
204 apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
205 bool save_patch, struct cont_desc *ret_desc)
206 {
207 struct cont_desc desc = { 0 };
208 u8 (*patch)[PATCH_MAX_SIZE];
209 struct microcode_amd *mc;
210 u32 rev, dummy, *new_rev;
211 bool ret = false;
212
213 #ifdef CONFIG_X86_32
214 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
215 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
216 #else
217 new_rev = &ucode_new_rev;
218 patch = &amd_ucode_patch;
219 #endif
220
221 desc.cpuid_1_eax = cpuid_1_eax;
222
223 scan_containers(ucode, size, &desc);
224 if (!desc.eq_id)
225 return ret;
226
227 mc = desc.mc;
228 if (!mc)
229 return ret;
230
231 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
232 if (rev >= mc->hdr.patch_id)
233 return ret;
234
235 if (!__apply_microcode_amd(mc)) {
236 *new_rev = mc->hdr.patch_id;
237 ret = true;
238
239 if (save_patch)
240 memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
241 }
242
243 if (ret_desc)
244 *ret_desc = desc;
245
246 return ret;
247 }
248
249 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
250 {
251 #ifdef CONFIG_X86_64
252 char fw_name[36] = "amd-ucode/microcode_amd.bin";
253
254 if (family >= 0x15)
255 snprintf(fw_name, sizeof(fw_name),
256 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
257
258 return get_builtin_firmware(cp, fw_name);
259 #else
260 return false;
261 #endif
262 }
263
264 void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
265 {
266 struct ucode_cpu_info *uci;
267 struct cpio_data cp;
268 const char *path;
269 bool use_pa;
270
271 if (IS_ENABLED(CONFIG_X86_32)) {
272 uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
273 path = (const char *)__pa_nodebug(ucode_path);
274 use_pa = true;
275 } else {
276 uci = ucode_cpu_info;
277 path = ucode_path;
278 use_pa = false;
279 }
280
281 if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
282 cp = find_microcode_in_initrd(path, use_pa);
283
284 /* Needed in load_microcode_amd() */
285 uci->cpu_sig.sig = cpuid_1_eax;
286
287 *ret = cp;
288 }
289
290 void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
291 {
292 struct cpio_data cp = { };
293
294 __load_ucode_amd(cpuid_1_eax, &cp);
295
296 if (!(cp.data && cp.size))
297 return;
298
299 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
300 }
301
302 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
303 {
304 struct equiv_cpu_entry *eq;
305 struct microcode_amd *mc;
306 struct cont_desc *desc;
307 u16 eq_id;
308
309 if (IS_ENABLED(CONFIG_X86_32)) {
310 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
311 desc = (struct cont_desc *)__pa_nodebug(&cont);
312 } else {
313 mc = (struct microcode_amd *)amd_ucode_patch;
314 desc = &cont;
315 }
316
317 /* First AP hasn't cached it yet, go through the blob. */
318 if (!desc->data) {
319 struct cpio_data cp = { };
320
321 if (desc->size == -1)
322 return;
323
324 reget:
325 __load_ucode_amd(cpuid_1_eax, &cp);
326 if (!(cp.data && cp.size)) {
327 /*
328 * Mark it so that other APs do not scan again for no
329 * real reason and slow down boot needlessly.
330 */
331 desc->size = -1;
332 return;
333 }
334
335 if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, desc)) {
336 desc->data = NULL;
337 desc->size = -1;
338 return;
339 }
340 }
341
342 eq = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ);
343
344 eq_id = find_equiv_id(eq, cpuid_1_eax);
345 if (!eq_id)
346 return;
347
348 if (eq_id == desc->eq_id) {
349 u32 rev, dummy;
350
351 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
352
353 mc = (struct microcode_amd *)amd_ucode_patch;
354
355 if (mc && rev < mc->hdr.patch_id) {
356 if (!__apply_microcode_amd(mc))
357 ucode_new_rev = mc->hdr.patch_id;
358 }
359
360 } else {
361
362 /*
363 * AP has a different equivalence ID than BSP, looks like
364 * mixed-steppings silicon so go through the ucode blob anew.
365 */
366 goto reget;
367 }
368 }
369
370 static enum ucode_state
371 load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
372
373 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
374 {
375 struct cont_desc desc = { 0 };
376 enum ucode_state ret;
377 struct cpio_data cp;
378
379 cp = find_microcode_in_initrd(ucode_path, false);
380 if (!(cp.data && cp.size))
381 return -EINVAL;
382
383 desc.cpuid_1_eax = cpuid_1_eax;
384
385 scan_containers(cp.data, cp.size, &desc);
386 if (!desc.eq_id)
387 return -EINVAL;
388
389 ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
390 desc.data, desc.size);
391 if (ret != UCODE_OK)
392 return -EINVAL;
393
394 return 0;
395 }
396
397 void reload_ucode_amd(void)
398 {
399 struct microcode_amd *mc;
400 u32 rev, dummy;
401
402 mc = (struct microcode_amd *)amd_ucode_patch;
403 if (!mc)
404 return;
405
406 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
407
408 if (rev < mc->hdr.patch_id) {
409 if (!__apply_microcode_amd(mc)) {
410 ucode_new_rev = mc->hdr.patch_id;
411 pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
412 }
413 }
414 }
415 static u16 __find_equiv_id(unsigned int cpu)
416 {
417 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
418 return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
419 }
420
421 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
422 {
423 int i = 0;
424
425 BUG_ON(!equiv_cpu_table);
426
427 while (equiv_cpu_table[i].equiv_cpu != 0) {
428 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
429 return equiv_cpu_table[i].installed_cpu;
430 i++;
431 }
432 return 0;
433 }
434
435 /*
436 * a small, trivial cache of per-family ucode patches
437 */
438 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
439 {
440 struct ucode_patch *p;
441
442 list_for_each_entry(p, &microcode_cache, plist)
443 if (p->equiv_cpu == equiv_cpu)
444 return p;
445 return NULL;
446 }
447
448 static void update_cache(struct ucode_patch *new_patch)
449 {
450 struct ucode_patch *p;
451
452 list_for_each_entry(p, &microcode_cache, plist) {
453 if (p->equiv_cpu == new_patch->equiv_cpu) {
454 if (p->patch_id >= new_patch->patch_id)
455 /* we already have the latest patch */
456 return;
457
458 list_replace(&p->plist, &new_patch->plist);
459 kfree(p->data);
460 kfree(p);
461 return;
462 }
463 }
464 /* no patch found, add it */
465 list_add_tail(&new_patch->plist, &microcode_cache);
466 }
467
468 static void free_cache(void)
469 {
470 struct ucode_patch *p, *tmp;
471
472 list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
473 __list_del(p->plist.prev, p->plist.next);
474 kfree(p->data);
475 kfree(p);
476 }
477 }
478
479 static struct ucode_patch *find_patch(unsigned int cpu)
480 {
481 u16 equiv_id;
482
483 equiv_id = __find_equiv_id(cpu);
484 if (!equiv_id)
485 return NULL;
486
487 return cache_find_patch(equiv_id);
488 }
489
490 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
491 {
492 struct cpuinfo_x86 *c = &cpu_data(cpu);
493 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
494 struct ucode_patch *p;
495
496 csig->sig = cpuid_eax(0x00000001);
497 csig->rev = c->microcode;
498
499 /*
500 * a patch could have been loaded early, set uci->mc so that
501 * mc_bp_resume() can call apply_microcode()
502 */
503 p = find_patch(cpu);
504 if (p && (p->patch_id == csig->rev))
505 uci->mc = p->data;
506
507 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
508
509 return 0;
510 }
511
512 static unsigned int verify_patch_size(u8 family, u32 patch_size,
513 unsigned int size)
514 {
515 u32 max_size;
516
517 #define F1XH_MPB_MAX_SIZE 2048
518 #define F14H_MPB_MAX_SIZE 1824
519 #define F15H_MPB_MAX_SIZE 4096
520 #define F16H_MPB_MAX_SIZE 3458
521
522 switch (family) {
523 case 0x14:
524 max_size = F14H_MPB_MAX_SIZE;
525 break;
526 case 0x15:
527 max_size = F15H_MPB_MAX_SIZE;
528 break;
529 case 0x16:
530 max_size = F16H_MPB_MAX_SIZE;
531 break;
532 default:
533 max_size = F1XH_MPB_MAX_SIZE;
534 break;
535 }
536
537 if (patch_size > min_t(u32, size, max_size)) {
538 pr_err("patch size mismatch\n");
539 return 0;
540 }
541
542 return patch_size;
543 }
544
545 static int apply_microcode_amd(int cpu)
546 {
547 struct cpuinfo_x86 *c = &cpu_data(cpu);
548 struct microcode_amd *mc_amd;
549 struct ucode_cpu_info *uci;
550 struct ucode_patch *p;
551 u32 rev, dummy;
552
553 BUG_ON(raw_smp_processor_id() != cpu);
554
555 uci = ucode_cpu_info + cpu;
556
557 p = find_patch(cpu);
558 if (!p)
559 return 0;
560
561 mc_amd = p->data;
562 uci->mc = p->data;
563
564 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
565
566 /* need to apply patch? */
567 if (rev >= mc_amd->hdr.patch_id) {
568 c->microcode = rev;
569 uci->cpu_sig.rev = rev;
570 return 0;
571 }
572
573 if (__apply_microcode_amd(mc_amd)) {
574 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
575 cpu, mc_amd->hdr.patch_id);
576 return -1;
577 }
578 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
579 mc_amd->hdr.patch_id);
580
581 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
582 c->microcode = mc_amd->hdr.patch_id;
583
584 return 0;
585 }
586
587 static int install_equiv_cpu_table(const u8 *buf)
588 {
589 unsigned int *ibuf = (unsigned int *)buf;
590 unsigned int type = ibuf[1];
591 unsigned int size = ibuf[2];
592
593 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
594 pr_err("empty section/"
595 "invalid type field in container file section header\n");
596 return -EINVAL;
597 }
598
599 equiv_cpu_table = vmalloc(size);
600 if (!equiv_cpu_table) {
601 pr_err("failed to allocate equivalent CPU table\n");
602 return -ENOMEM;
603 }
604
605 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
606
607 /* add header length */
608 return size + CONTAINER_HDR_SZ;
609 }
610
611 static void free_equiv_cpu_table(void)
612 {
613 vfree(equiv_cpu_table);
614 equiv_cpu_table = NULL;
615 }
616
617 static void cleanup(void)
618 {
619 free_equiv_cpu_table();
620 free_cache();
621 }
622
623 /*
624 * We return the current size even if some of the checks failed so that
625 * we can skip over the next patch. If we return a negative value, we
626 * signal a grave error like a memory allocation has failed and the
627 * driver cannot continue functioning normally. In such cases, we tear
628 * down everything we've used up so far and exit.
629 */
630 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
631 {
632 struct microcode_header_amd *mc_hdr;
633 struct ucode_patch *patch;
634 unsigned int patch_size, crnt_size, ret;
635 u32 proc_fam;
636 u16 proc_id;
637
638 patch_size = *(u32 *)(fw + 4);
639 crnt_size = patch_size + SECTION_HDR_SIZE;
640 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
641 proc_id = mc_hdr->processor_rev_id;
642
643 proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
644 if (!proc_fam) {
645 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
646 return crnt_size;
647 }
648
649 /* check if patch is for the current family */
650 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
651 if (proc_fam != family)
652 return crnt_size;
653
654 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
655 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
656 mc_hdr->patch_id);
657 return crnt_size;
658 }
659
660 ret = verify_patch_size(family, patch_size, leftover);
661 if (!ret) {
662 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
663 return crnt_size;
664 }
665
666 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
667 if (!patch) {
668 pr_err("Patch allocation failure.\n");
669 return -EINVAL;
670 }
671
672 patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
673 if (!patch->data) {
674 pr_err("Patch data allocation failure.\n");
675 kfree(patch);
676 return -EINVAL;
677 }
678
679 INIT_LIST_HEAD(&patch->plist);
680 patch->patch_id = mc_hdr->patch_id;
681 patch->equiv_cpu = proc_id;
682
683 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
684 __func__, patch->patch_id, proc_id);
685
686 /* ... and add to cache. */
687 update_cache(patch);
688
689 return crnt_size;
690 }
691
692 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
693 size_t size)
694 {
695 enum ucode_state ret = UCODE_ERROR;
696 unsigned int leftover;
697 u8 *fw = (u8 *)data;
698 int crnt_size = 0;
699 int offset;
700
701 offset = install_equiv_cpu_table(data);
702 if (offset < 0) {
703 pr_err("failed to create equivalent cpu table\n");
704 return ret;
705 }
706 fw += offset;
707 leftover = size - offset;
708
709 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
710 pr_err("invalid type field in container file section header\n");
711 free_equiv_cpu_table();
712 return ret;
713 }
714
715 while (leftover) {
716 crnt_size = verify_and_add_patch(family, fw, leftover);
717 if (crnt_size < 0)
718 return ret;
719
720 fw += crnt_size;
721 leftover -= crnt_size;
722 }
723
724 return UCODE_OK;
725 }
726
727 static enum ucode_state
728 load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
729 {
730 enum ucode_state ret;
731
732 /* free old equiv table */
733 free_equiv_cpu_table();
734
735 ret = __load_microcode_amd(family, data, size);
736
737 if (ret != UCODE_OK)
738 cleanup();
739
740 #ifdef CONFIG_X86_32
741 /* save BSP's matching patch for early load */
742 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
743 struct ucode_patch *p = find_patch(cpu);
744 if (p) {
745 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
746 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
747 PATCH_MAX_SIZE));
748 }
749 }
750 #endif
751 return ret;
752 }
753
754 /*
755 * AMD microcode firmware naming convention, up to family 15h they are in
756 * the legacy file:
757 *
758 * amd-ucode/microcode_amd.bin
759 *
760 * This legacy file is always smaller than 2K in size.
761 *
762 * Beginning with family 15h, they are in family-specific firmware files:
763 *
764 * amd-ucode/microcode_amd_fam15h.bin
765 * amd-ucode/microcode_amd_fam16h.bin
766 * ...
767 *
768 * These might be larger than 2K.
769 */
770 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
771 bool refresh_fw)
772 {
773 char fw_name[36] = "amd-ucode/microcode_amd.bin";
774 struct cpuinfo_x86 *c = &cpu_data(cpu);
775 enum ucode_state ret = UCODE_NFOUND;
776 const struct firmware *fw;
777
778 /* reload ucode container only on the boot cpu */
779 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
780 return UCODE_OK;
781
782 if (c->x86 >= 0x15)
783 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
784
785 if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
786 pr_debug("failed to load file %s\n", fw_name);
787 goto out;
788 }
789
790 ret = UCODE_ERROR;
791 if (*(u32 *)fw->data != UCODE_MAGIC) {
792 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
793 goto fw_release;
794 }
795
796 ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
797
798 fw_release:
799 release_firmware(fw);
800
801 out:
802 return ret;
803 }
804
805 static enum ucode_state
806 request_microcode_user(int cpu, const void __user *buf, size_t size)
807 {
808 return UCODE_ERROR;
809 }
810
811 static void microcode_fini_cpu_amd(int cpu)
812 {
813 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
814
815 uci->mc = NULL;
816 }
817
818 static struct microcode_ops microcode_amd_ops = {
819 .request_microcode_user = request_microcode_user,
820 .request_microcode_fw = request_microcode_amd,
821 .collect_cpu_info = collect_cpu_info_amd,
822 .apply_microcode = apply_microcode_amd,
823 .microcode_fini_cpu = microcode_fini_cpu_amd,
824 };
825
826 struct microcode_ops * __init init_amd_microcode(void)
827 {
828 struct cpuinfo_x86 *c = &boot_cpu_data;
829
830 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
831 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
832 return NULL;
833 }
834
835 if (ucode_new_rev)
836 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
837 ucode_new_rev);
838
839 return &microcode_amd_ops;
840 }
841
842 void __exit exit_amd_microcode(void)
843 {
844 cleanup();
845 }