2 * AMD CPU Microcode Update Driver for Linux
4 * This driver allows to upgrade microcode on F10h AMD
7 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8 * 2013-2018 Borislav Petkov <bp@alien8.de>
10 * Author: Peter Oruba <peter.oruba@amd.com>
13 * Tigran Aivazian <aivazian.tigran@gmail.com>
16 * Copyright (C) 2013 Advanced Micro Devices, Inc.
18 * Author: Jacob Shin <jacob.shin@amd.com>
19 * Fixes: Borislav Petkov <bp@suse.de>
21 * Licensed under the terms of the GNU General Public
22 * License version 2. See file COPYING for details.
24 #define pr_fmt(fmt) "microcode: " fmt
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
34 #include <asm/microcode_amd.h>
35 #include <asm/microcode.h>
36 #include <asm/processor.h>
37 #include <asm/setup.h>
41 static struct equiv_cpu_table
{
42 unsigned int num_entries
;
43 struct equiv_cpu_entry
*entry
;
47 * This points to the current valid container of microcode patches which we will
48 * save from the initrd/builtin before jettisoning its contents. @mc is the
49 * microcode patch we found to match.
52 struct microcode_amd
*mc
;
59 static u32 ucode_new_rev
;
60 static u8 amd_ucode_patch
[PATCH_MAX_SIZE
];
63 * Microcode patch container file is prepended to the initrd in cpio
64 * format. See Documentation/x86/microcode.txt
67 ucode_path
[] __maybe_unused
= "kernel/x86/microcode/AuthenticAMD.bin";
69 static u16
find_equiv_id(struct equiv_cpu_table
*et
, u32 sig
)
73 if (!et
|| !et
->num_entries
)
76 for (i
= 0; i
< et
->num_entries
; i
++) {
77 struct equiv_cpu_entry
*e
= &et
->entry
[i
];
79 if (sig
== e
->installed_cpu
)
88 * Check whether there is a valid microcode container file at the beginning
89 * of @buf of size @buf_size. Set @early to use this function in the early path.
91 static bool verify_container(const u8
*buf
, size_t buf_size
, bool early
)
95 if (buf_size
<= CONTAINER_HDR_SZ
) {
97 pr_debug("Truncated microcode container header.\n");
102 cont_magic
= *(const u32
*)buf
;
103 if (cont_magic
!= UCODE_MAGIC
) {
105 pr_debug("Invalid magic value (0x%08x).\n", cont_magic
);
114 * Check whether there is a valid, non-truncated CPU equivalence table at the
115 * beginning of @buf of size @buf_size. Set @early to use this function in the
118 static bool verify_equivalence_table(const u8
*buf
, size_t buf_size
, bool early
)
120 const u32
*hdr
= (const u32
*)buf
;
121 u32 cont_type
, equiv_tbl_len
;
123 if (!verify_container(buf
, buf_size
, early
))
127 if (cont_type
!= UCODE_EQUIV_CPU_TABLE_TYPE
) {
129 pr_debug("Wrong microcode container equivalence table type: %u.\n",
135 buf_size
-= CONTAINER_HDR_SZ
;
137 equiv_tbl_len
= hdr
[2];
138 if (equiv_tbl_len
< sizeof(struct equiv_cpu_entry
) ||
139 buf_size
< equiv_tbl_len
) {
141 pr_debug("Truncated equivalence table.\n");
150 * Check whether there is a valid, non-truncated microcode patch section at the
151 * beginning of @buf of size @buf_size. Set @early to use this function in the
154 * On success, @sh_psize returns the patch size according to the section header,
158 __verify_patch_section(const u8
*buf
, size_t buf_size
, u32
*sh_psize
, bool early
)
163 if (buf_size
< SECTION_HDR_SIZE
) {
165 pr_debug("Truncated patch section.\n");
170 hdr
= (const u32
*)buf
;
174 if (p_type
!= UCODE_UCODE_TYPE
) {
176 pr_debug("Invalid type field (0x%x) in container file section header.\n",
182 if (p_size
< sizeof(struct microcode_header_amd
)) {
184 pr_debug("Patch of size %u too short.\n", p_size
);
195 * Check whether the passed remaining file @buf_size is large enough to contain
196 * a patch of the indicated @sh_psize (and also whether this size does not
197 * exceed the per-family maximum). @sh_psize is the size read from the section
200 static unsigned int __verify_patch_size(u8 family
, u32 sh_psize
, size_t buf_size
)
205 return min_t(u32
, sh_psize
, buf_size
);
207 #define F1XH_MPB_MAX_SIZE 2048
208 #define F14H_MPB_MAX_SIZE 1824
212 max_size
= F1XH_MPB_MAX_SIZE
;
215 max_size
= F14H_MPB_MAX_SIZE
;
218 WARN(1, "%s: WTF family: 0x%x\n", __func__
, family
);
223 if (sh_psize
> min_t(u32
, buf_size
, max_size
))
230 * Verify the patch in @buf.
234 * positive: patch is not for this family, skip it
238 verify_patch(u8 family
, const u8
*buf
, size_t buf_size
, u32
*patch_size
, bool early
)
240 struct microcode_header_amd
*mc_hdr
;
246 if (!__verify_patch_section(buf
, buf_size
, &sh_psize
, early
))
250 * The section header length is not included in this indicated size
251 * but is present in the leftover file length so we need to subtract
252 * it before passing this value to the function below.
254 buf_size
-= SECTION_HDR_SIZE
;
257 * Check if the remaining buffer is big enough to contain a patch of
258 * size sh_psize, as the section claims.
260 if (buf_size
< sh_psize
) {
262 pr_debug("Patch of size %u truncated.\n", sh_psize
);
267 ret
= __verify_patch_size(family
, sh_psize
, buf_size
);
270 pr_debug("Per-family patch size mismatch.\n");
274 *patch_size
= sh_psize
;
276 mc_hdr
= (struct microcode_header_amd
*)(buf
+ SECTION_HDR_SIZE
);
277 if (mc_hdr
->nb_dev_id
|| mc_hdr
->sb_dev_id
) {
279 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr
->patch_id
);
283 proc_id
= mc_hdr
->processor_rev_id
;
284 patch_fam
= 0xf + (proc_id
>> 12);
285 if (patch_fam
!= family
)
292 * This scans the ucode blob for the proper container as we can have multiple
293 * containers glued together. Returns the equivalence ID from the equivalence
294 * table or 0 if none found.
295 * Returns the amount of bytes consumed while scanning. @desc contains all the
296 * data we're going to use in later stages of the application.
298 static size_t parse_container(u8
*ucode
, size_t size
, struct cont_desc
*desc
)
300 struct equiv_cpu_table table
;
301 size_t orig_size
= size
;
302 u32
*hdr
= (u32
*)ucode
;
306 if (!verify_equivalence_table(ucode
, size
, true))
311 table
.entry
= (struct equiv_cpu_entry
*)(buf
+ CONTAINER_HDR_SZ
);
312 table
.num_entries
= hdr
[2] / sizeof(struct equiv_cpu_entry
);
315 * Find the equivalence ID of our CPU in this table. Even if this table
316 * doesn't contain a patch for the CPU, scan through the whole container
317 * so that it can be skipped in case there are other containers appended.
319 eq_id
= find_equiv_id(&table
, desc
->cpuid_1_eax
);
321 buf
+= hdr
[2] + CONTAINER_HDR_SZ
;
322 size
-= hdr
[2] + CONTAINER_HDR_SZ
;
325 * Scan through the rest of the container to find where it ends. We do
326 * some basic sanity-checking too.
329 struct microcode_amd
*mc
;
333 ret
= verify_patch(x86_family(desc
->cpuid_1_eax
), buf
, size
, &patch_size
, true);
336 * Patch verification failed, skip to the next
337 * container, if there's one:
340 } else if (ret
> 0) {
344 mc
= (struct microcode_amd
*)(buf
+ SECTION_HDR_SIZE
);
345 if (eq_id
== mc
->hdr
.processor_rev_id
) {
346 desc
->psize
= patch_size
;
351 /* Skip patch section header too: */
352 buf
+= patch_size
+ SECTION_HDR_SIZE
;
353 size
-= patch_size
+ SECTION_HDR_SIZE
;
357 * If we have found a patch (desc->mc), it means we're looking at the
358 * container which has a patch for this CPU so return 0 to mean, @ucode
359 * already points to the proper container. Otherwise, we return the size
360 * we scanned so that we can advance to the next container in the
365 desc
->size
= orig_size
- size
;
371 return orig_size
- size
;
375 * Scan the ucode blob for the proper container as we can have multiple
376 * containers glued together.
378 static void scan_containers(u8
*ucode
, size_t size
, struct cont_desc
*desc
)
381 size_t s
= parse_container(ucode
, size
, desc
);
385 /* catch wraparound */
395 static int __apply_microcode_amd(struct microcode_amd
*mc
)
399 native_wrmsrl(MSR_AMD64_PATCH_LOADER
, (u64
)(long)&mc
->hdr
.data_code
);
401 /* verify patch application was successful */
402 native_rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, dummy
);
403 if (rev
!= mc
->hdr
.patch_id
)
410 * Early load occurs before we can vmalloc(). So we look for the microcode
411 * patch container file in initrd, traverse equivalent cpu table, look for a
412 * matching microcode patch, and update, all in initrd memory in place.
413 * When vmalloc() is available for use later -- on 64-bit during first AP load,
414 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
415 * load_microcode_amd() to save equivalent cpu table and microcode patches in
416 * kernel heap memory.
418 * Returns true if container found (sets @desc), false otherwise.
421 apply_microcode_early_amd(u32 cpuid_1_eax
, void *ucode
, size_t size
, bool save_patch
)
423 struct cont_desc desc
= { 0 };
424 u8 (*patch
)[PATCH_MAX_SIZE
];
425 struct microcode_amd
*mc
;
426 u32 rev
, dummy
, *new_rev
;
430 new_rev
= (u32
*)__pa_nodebug(&ucode_new_rev
);
431 patch
= (u8 (*)[PATCH_MAX_SIZE
])__pa_nodebug(&amd_ucode_patch
);
433 new_rev
= &ucode_new_rev
;
434 patch
= &amd_ucode_patch
;
437 desc
.cpuid_1_eax
= cpuid_1_eax
;
439 scan_containers(ucode
, size
, &desc
);
445 native_rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, dummy
);
446 if (rev
>= mc
->hdr
.patch_id
)
449 if (!__apply_microcode_amd(mc
)) {
450 *new_rev
= mc
->hdr
.patch_id
;
454 memcpy(patch
, mc
, min_t(u32
, desc
.psize
, PATCH_MAX_SIZE
));
460 static bool get_builtin_microcode(struct cpio_data
*cp
, unsigned int family
)
463 char fw_name
[36] = "amd-ucode/microcode_amd.bin";
466 snprintf(fw_name
, sizeof(fw_name
),
467 "amd-ucode/microcode_amd_fam%.2xh.bin", family
);
469 return get_builtin_firmware(cp
, fw_name
);
475 static void __load_ucode_amd(unsigned int cpuid_1_eax
, struct cpio_data
*ret
)
477 struct ucode_cpu_info
*uci
;
482 if (IS_ENABLED(CONFIG_X86_32
)) {
483 uci
= (struct ucode_cpu_info
*)__pa_nodebug(ucode_cpu_info
);
484 path
= (const char *)__pa_nodebug(ucode_path
);
487 uci
= ucode_cpu_info
;
492 if (!get_builtin_microcode(&cp
, x86_family(cpuid_1_eax
)))
493 cp
= find_microcode_in_initrd(path
, use_pa
);
495 /* Needed in load_microcode_amd() */
496 uci
->cpu_sig
.sig
= cpuid_1_eax
;
501 void __init
load_ucode_amd_bsp(unsigned int cpuid_1_eax
)
503 struct cpio_data cp
= { };
505 __load_ucode_amd(cpuid_1_eax
, &cp
);
506 if (!(cp
.data
&& cp
.size
))
509 apply_microcode_early_amd(cpuid_1_eax
, cp
.data
, cp
.size
, true);
512 void load_ucode_amd_ap(unsigned int cpuid_1_eax
)
514 struct microcode_amd
*mc
;
516 u32
*new_rev
, rev
, dummy
;
518 if (IS_ENABLED(CONFIG_X86_32
)) {
519 mc
= (struct microcode_amd
*)__pa_nodebug(amd_ucode_patch
);
520 new_rev
= (u32
*)__pa_nodebug(&ucode_new_rev
);
522 mc
= (struct microcode_amd
*)amd_ucode_patch
;
523 new_rev
= &ucode_new_rev
;
526 native_rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, dummy
);
528 /* Check whether we have saved a new patch already: */
529 if (*new_rev
&& rev
< mc
->hdr
.patch_id
) {
530 if (!__apply_microcode_amd(mc
)) {
531 *new_rev
= mc
->hdr
.patch_id
;
536 __load_ucode_amd(cpuid_1_eax
, &cp
);
537 if (!(cp
.data
&& cp
.size
))
540 apply_microcode_early_amd(cpuid_1_eax
, cp
.data
, cp
.size
, false);
543 static enum ucode_state
544 load_microcode_amd(bool save
, u8 family
, const u8
*data
, size_t size
);
546 int __init
save_microcode_in_initrd_amd(unsigned int cpuid_1_eax
)
548 struct cont_desc desc
= { 0 };
549 enum ucode_state ret
;
552 cp
= find_microcode_in_initrd(ucode_path
, false);
553 if (!(cp
.data
&& cp
.size
))
556 desc
.cpuid_1_eax
= cpuid_1_eax
;
558 scan_containers(cp
.data
, cp
.size
, &desc
);
562 ret
= load_microcode_amd(true, x86_family(cpuid_1_eax
), desc
.data
, desc
.size
);
563 if (ret
> UCODE_UPDATED
)
569 void reload_ucode_amd(void)
571 struct microcode_amd
*mc
;
574 mc
= (struct microcode_amd
*)amd_ucode_patch
;
576 rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, dummy
);
578 if (rev
< mc
->hdr
.patch_id
) {
579 if (!__apply_microcode_amd(mc
)) {
580 ucode_new_rev
= mc
->hdr
.patch_id
;
581 pr_info("reload patch_level=0x%08x\n", ucode_new_rev
);
585 static u16
__find_equiv_id(unsigned int cpu
)
587 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
588 return find_equiv_id(&equiv_table
, uci
->cpu_sig
.sig
);
592 * a small, trivial cache of per-family ucode patches
594 static struct ucode_patch
*cache_find_patch(u16 equiv_cpu
)
596 struct ucode_patch
*p
;
598 list_for_each_entry(p
, µcode_cache
, plist
)
599 if (p
->equiv_cpu
== equiv_cpu
)
604 static void update_cache(struct ucode_patch
*new_patch
)
606 struct ucode_patch
*p
;
608 list_for_each_entry(p
, µcode_cache
, plist
) {
609 if (p
->equiv_cpu
== new_patch
->equiv_cpu
) {
610 if (p
->patch_id
>= new_patch
->patch_id
) {
611 /* we already have the latest patch */
612 kfree(new_patch
->data
);
617 list_replace(&p
->plist
, &new_patch
->plist
);
623 /* no patch found, add it */
624 list_add_tail(&new_patch
->plist
, µcode_cache
);
627 static void free_cache(void)
629 struct ucode_patch
*p
, *tmp
;
631 list_for_each_entry_safe(p
, tmp
, µcode_cache
, plist
) {
632 __list_del(p
->plist
.prev
, p
->plist
.next
);
638 static struct ucode_patch
*find_patch(unsigned int cpu
)
642 equiv_id
= __find_equiv_id(cpu
);
646 return cache_find_patch(equiv_id
);
649 static int collect_cpu_info_amd(int cpu
, struct cpu_signature
*csig
)
651 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
652 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
653 struct ucode_patch
*p
;
655 csig
->sig
= cpuid_eax(0x00000001);
656 csig
->rev
= c
->microcode
;
659 * a patch could have been loaded early, set uci->mc so that
660 * mc_bp_resume() can call apply_microcode()
663 if (p
&& (p
->patch_id
== csig
->rev
))
666 pr_info("CPU%d: patch_level=0x%08x\n", cpu
, csig
->rev
);
671 static enum ucode_state
apply_microcode_amd(int cpu
)
673 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
674 struct microcode_amd
*mc_amd
;
675 struct ucode_cpu_info
*uci
;
676 struct ucode_patch
*p
;
677 enum ucode_state ret
;
680 BUG_ON(raw_smp_processor_id() != cpu
);
682 uci
= ucode_cpu_info
+ cpu
;
691 rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, dummy
);
693 /* need to apply patch? */
694 if (rev
>= mc_amd
->hdr
.patch_id
) {
699 if (__apply_microcode_amd(mc_amd
)) {
700 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
701 cpu
, mc_amd
->hdr
.patch_id
);
705 rev
= mc_amd
->hdr
.patch_id
;
708 pr_info("CPU%d: new patch_level=0x%08x\n", cpu
, rev
);
711 uci
->cpu_sig
.rev
= rev
;
714 /* Update boot_cpu_data's revision too, if we're on the BSP: */
715 if (c
->cpu_index
== boot_cpu_data
.cpu_index
)
716 boot_cpu_data
.microcode
= rev
;
721 static size_t install_equiv_cpu_table(const u8
*buf
, size_t buf_size
)
726 if (!verify_equivalence_table(buf
, buf_size
, false))
729 hdr
= (const u32
*)buf
;
730 equiv_tbl_len
= hdr
[2];
732 equiv_table
.entry
= vmalloc(equiv_tbl_len
);
733 if (!equiv_table
.entry
) {
734 pr_err("failed to allocate equivalent CPU table\n");
738 memcpy(equiv_table
.entry
, buf
+ CONTAINER_HDR_SZ
, equiv_tbl_len
);
739 equiv_table
.num_entries
= equiv_tbl_len
/ sizeof(struct equiv_cpu_entry
);
741 /* add header length */
742 return equiv_tbl_len
+ CONTAINER_HDR_SZ
;
745 static void free_equiv_cpu_table(void)
747 vfree(equiv_table
.entry
);
748 memset(&equiv_table
, 0, sizeof(equiv_table
));
751 static void cleanup(void)
753 free_equiv_cpu_table();
758 * Return a non-negative value even if some of the checks failed so that
759 * we can skip over the next patch. If we return a negative value, we
760 * signal a grave error like a memory allocation has failed and the
761 * driver cannot continue functioning normally. In such cases, we tear
762 * down everything we've used up so far and exit.
764 static int verify_and_add_patch(u8 family
, u8
*fw
, unsigned int leftover
,
765 unsigned int *patch_size
)
767 struct microcode_header_amd
*mc_hdr
;
768 struct ucode_patch
*patch
;
772 ret
= verify_patch(family
, fw
, leftover
, patch_size
, false);
776 patch
= kzalloc(sizeof(*patch
), GFP_KERNEL
);
778 pr_err("Patch allocation failure.\n");
782 patch
->data
= kmemdup(fw
+ SECTION_HDR_SIZE
, *patch_size
, GFP_KERNEL
);
784 pr_err("Patch data allocation failure.\n");
789 mc_hdr
= (struct microcode_header_amd
*)(fw
+ SECTION_HDR_SIZE
);
790 proc_id
= mc_hdr
->processor_rev_id
;
792 INIT_LIST_HEAD(&patch
->plist
);
793 patch
->patch_id
= mc_hdr
->patch_id
;
794 patch
->equiv_cpu
= proc_id
;
796 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
797 __func__
, patch
->patch_id
, proc_id
);
799 /* ... and add to cache. */
805 static enum ucode_state
__load_microcode_amd(u8 family
, const u8
*data
,
811 offset
= install_equiv_cpu_table(data
, size
);
818 if (*(u32
*)fw
!= UCODE_UCODE_TYPE
) {
819 pr_err("invalid type field in container file section header\n");
820 free_equiv_cpu_table();
825 unsigned int crnt_size
= 0;
828 ret
= verify_and_add_patch(family
, fw
, size
, &crnt_size
);
832 fw
+= crnt_size
+ SECTION_HDR_SIZE
;
833 size
-= (crnt_size
+ SECTION_HDR_SIZE
);
839 static enum ucode_state
840 load_microcode_amd(bool save
, u8 family
, const u8
*data
, size_t size
)
842 struct ucode_patch
*p
;
843 enum ucode_state ret
;
845 /* free old equiv table */
846 free_equiv_cpu_table();
848 ret
= __load_microcode_amd(family
, data
, size
);
849 if (ret
!= UCODE_OK
) {
858 if (boot_cpu_data
.microcode
== p
->patch_id
)
864 /* save BSP's matching patch for early load */
868 memset(amd_ucode_patch
, 0, PATCH_MAX_SIZE
);
869 memcpy(amd_ucode_patch
, p
->data
, min_t(u32
, ksize(p
->data
), PATCH_MAX_SIZE
));
875 * AMD microcode firmware naming convention, up to family 15h they are in
878 * amd-ucode/microcode_amd.bin
880 * This legacy file is always smaller than 2K in size.
882 * Beginning with family 15h, they are in family-specific firmware files:
884 * amd-ucode/microcode_amd_fam15h.bin
885 * amd-ucode/microcode_amd_fam16h.bin
888 * These might be larger than 2K.
890 static enum ucode_state
request_microcode_amd(int cpu
, struct device
*device
,
893 char fw_name
[36] = "amd-ucode/microcode_amd.bin";
894 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
895 bool bsp
= c
->cpu_index
== boot_cpu_data
.cpu_index
;
896 enum ucode_state ret
= UCODE_NFOUND
;
897 const struct firmware
*fw
;
899 /* reload ucode container only on the boot cpu */
900 if (!refresh_fw
|| !bsp
)
904 snprintf(fw_name
, sizeof(fw_name
), "amd-ucode/microcode_amd_fam%.2xh.bin", c
->x86
);
906 if (request_firmware_direct(&fw
, (const char *)fw_name
, device
)) {
907 pr_debug("failed to load file %s\n", fw_name
);
912 if (!verify_container(fw
->data
, fw
->size
, false))
915 ret
= load_microcode_amd(bsp
, c
->x86
, fw
->data
, fw
->size
);
918 release_firmware(fw
);
924 static enum ucode_state
925 request_microcode_user(int cpu
, const void __user
*buf
, size_t size
)
930 static void microcode_fini_cpu_amd(int cpu
)
932 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
937 static struct microcode_ops microcode_amd_ops
= {
938 .request_microcode_user
= request_microcode_user
,
939 .request_microcode_fw
= request_microcode_amd
,
940 .collect_cpu_info
= collect_cpu_info_amd
,
941 .apply_microcode
= apply_microcode_amd
,
942 .microcode_fini_cpu
= microcode_fini_cpu_amd
,
945 struct microcode_ops
* __init
init_amd_microcode(void)
947 struct cpuinfo_x86
*c
= &boot_cpu_data
;
949 if (c
->x86_vendor
!= X86_VENDOR_AMD
|| c
->x86
< 0x10) {
950 pr_warn("AMD CPU family 0x%x not supported\n", c
->x86
);
955 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
958 return µcode_amd_ops
;
961 void __exit
exit_amd_microcode(void)