1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Intel CPU Microcode Update Driver for Linux
5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
6 * 2006 Shaohua Li <shaohua.li@intel.com>
8 * Intel CPU microcode early update for Linux
10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
11 * H Peter Anvin" <hpa@zytor.com>
13 #define pr_fmt(fmt) "microcode: " fmt
14 #include <linux/earlycpio.h>
15 #include <linux/firmware.h>
16 #include <linux/uaccess.h>
17 #include <linux/initrd.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/cpu.h>
21 #include <linux/uio.h>
24 #include <asm/cpu_device_id.h>
25 #include <asm/processor.h>
26 #include <asm/tlbflush.h>
27 #include <asm/setup.h>
32 static const char ucode_path
[] = "kernel/x86/microcode/GenuineIntel.bin";
34 #define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
36 /* Current microcode patch used in early patching on the APs. */
37 static struct microcode_intel
*ucode_patch_va __read_mostly
;
38 static struct microcode_intel
*ucode_patch_late __read_mostly
;
40 /* last level cache size per core */
41 static unsigned int llc_size_per_core __ro_after_init
;
43 /* microcode format is extended from prescott processors */
44 struct extended_signature
{
50 struct extended_sigtable
{
53 unsigned int reserved
[3];
54 struct extended_signature sigs
[];
57 #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
58 #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
59 #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
61 static inline unsigned int get_totalsize(struct microcode_header_intel
*hdr
)
63 return hdr
->datasize
? hdr
->totalsize
: DEFAULT_UCODE_TOTALSIZE
;
66 static inline unsigned int exttable_size(struct extended_sigtable
*et
)
68 return et
->count
* EXT_SIGNATURE_SIZE
+ EXT_HEADER_SIZE
;
71 void intel_collect_cpu_info(struct cpu_signature
*sig
)
73 sig
->sig
= cpuid_eax(1);
75 sig
->rev
= intel_get_microcode_revision();
77 if (x86_model(sig
->sig
) >= 5 || x86_family(sig
->sig
) > 6) {
80 /* get processor flags from MSR 0x17 */
81 native_rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
82 sig
->pf
= 1 << ((val
[1] >> 18) & 7);
85 EXPORT_SYMBOL_GPL(intel_collect_cpu_info
);
87 static inline bool cpu_signatures_match(struct cpu_signature
*s1
, unsigned int sig2
,
93 /* Processor flags are either both 0 or they intersect. */
94 return ((!s1
->pf
&& !pf2
) || (s1
->pf
& pf2
));
97 bool intel_find_matching_signature(void *mc
, struct cpu_signature
*sig
)
99 struct microcode_header_intel
*mc_hdr
= mc
;
100 struct extended_signature
*ext_sig
;
101 struct extended_sigtable
*ext_hdr
;
104 if (cpu_signatures_match(sig
, mc_hdr
->sig
, mc_hdr
->pf
))
107 /* Look for ext. headers: */
108 if (get_totalsize(mc_hdr
) <= intel_microcode_get_datasize(mc_hdr
) + MC_HEADER_SIZE
)
111 ext_hdr
= mc
+ intel_microcode_get_datasize(mc_hdr
) + MC_HEADER_SIZE
;
112 ext_sig
= (void *)ext_hdr
+ EXT_HEADER_SIZE
;
114 for (i
= 0; i
< ext_hdr
->count
; i
++) {
115 if (cpu_signatures_match(sig
, ext_sig
->sig
, ext_sig
->pf
))
121 EXPORT_SYMBOL_GPL(intel_find_matching_signature
);
124 * intel_microcode_sanity_check() - Sanity check microcode file.
125 * @mc: Pointer to the microcode file contents.
126 * @print_err: Display failure reason if true, silent if false.
127 * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.
128 * Validate if the microcode header type matches with the type
131 * Validate certain header fields and verify if computed checksum matches
132 * with the one specified in the header.
134 * Return: 0 if the file passes all the checks, -EINVAL if any of the checks
137 int intel_microcode_sanity_check(void *mc
, bool print_err
, int hdr_type
)
139 unsigned long total_size
, data_size
, ext_table_size
;
140 struct microcode_header_intel
*mc_header
= mc
;
141 struct extended_sigtable
*ext_header
= NULL
;
142 u32 sum
, orig_sum
, ext_sigcount
= 0, i
;
143 struct extended_signature
*ext_sig
;
145 total_size
= get_totalsize(mc_header
);
146 data_size
= intel_microcode_get_datasize(mc_header
);
148 if (data_size
+ MC_HEADER_SIZE
> total_size
) {
150 pr_err("Error: bad microcode data file size.\n");
154 if (mc_header
->ldrver
!= 1 || mc_header
->hdrver
!= hdr_type
) {
156 pr_err("Error: invalid/unknown microcode update format. Header type %d\n",
161 ext_table_size
= total_size
- (MC_HEADER_SIZE
+ data_size
);
162 if (ext_table_size
) {
163 u32 ext_table_sum
= 0;
166 if (ext_table_size
< EXT_HEADER_SIZE
||
167 ((ext_table_size
- EXT_HEADER_SIZE
) % EXT_SIGNATURE_SIZE
)) {
169 pr_err("Error: truncated extended signature table.\n");
173 ext_header
= mc
+ MC_HEADER_SIZE
+ data_size
;
174 if (ext_table_size
!= exttable_size(ext_header
)) {
176 pr_err("Error: extended signature table size mismatch.\n");
180 ext_sigcount
= ext_header
->count
;
183 * Check extended table checksum: the sum of all dwords that
184 * comprise a valid table must be 0.
186 ext_tablep
= (u32
*)ext_header
;
188 i
= ext_table_size
/ sizeof(u32
);
190 ext_table_sum
+= ext_tablep
[i
];
194 pr_warn("Bad extended signature table checksum, aborting.\n");
200 * Calculate the checksum of update data and header. The checksum of
201 * valid update data and header including the extended signature table
205 i
= (MC_HEADER_SIZE
+ data_size
) / sizeof(u32
);
207 orig_sum
+= ((u32
*)mc
)[i
];
211 pr_err("Bad microcode data checksum, aborting.\n");
219 * Check extended signature checksum: 0 => valid.
221 for (i
= 0; i
< ext_sigcount
; i
++) {
222 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
+
223 EXT_SIGNATURE_SIZE
* i
;
225 sum
= (mc_header
->sig
+ mc_header
->pf
+ mc_header
->cksum
) -
226 (ext_sig
->sig
+ ext_sig
->pf
+ ext_sig
->cksum
);
229 pr_err("Bad extended signature checksum, aborting.\n");
235 EXPORT_SYMBOL_GPL(intel_microcode_sanity_check
);
237 static void update_ucode_pointer(struct microcode_intel
*mc
)
239 kvfree(ucode_patch_va
);
242 * Save the virtual address for early loading and for eventual free
248 static void save_microcode_patch(struct microcode_intel
*patch
)
250 unsigned int size
= get_totalsize(&patch
->hdr
);
251 struct microcode_intel
*mc
;
253 mc
= kvmemdup(patch
, size
, GFP_KERNEL
);
255 update_ucode_pointer(mc
);
257 pr_err("Unable to allocate microcode memory size: %u\n", size
);
260 /* Scan blob for microcode matching the boot CPUs family, model, stepping */
261 static __init
struct microcode_intel
*scan_microcode(void *data
, size_t size
,
262 struct ucode_cpu_info
*uci
,
265 struct microcode_header_intel
*mc_header
;
266 struct microcode_intel
*patch
= NULL
;
267 u32 cur_rev
= uci
->cpu_sig
.rev
;
268 unsigned int mc_size
;
270 for (; size
>= sizeof(struct microcode_header_intel
); size
-= mc_size
, data
+= mc_size
) {
271 mc_header
= (struct microcode_header_intel
*)data
;
273 mc_size
= get_totalsize(mc_header
);
274 if (!mc_size
|| mc_size
> size
||
275 intel_microcode_sanity_check(data
, false, MC_HEADER_TYPE_MICROCODE
) < 0)
278 if (!intel_find_matching_signature(data
, &uci
->cpu_sig
))
282 * For saving the early microcode, find the matching revision which
283 * was loaded on the BSP.
285 * On the BSP during early boot, find a newer revision than
286 * actually loaded in the CPU.
289 if (cur_rev
!= mc_header
->rev
)
291 } else if (cur_rev
>= mc_header
->rev
) {
296 cur_rev
= mc_header
->rev
;
299 return size
? NULL
: patch
;
302 static enum ucode_state
__apply_microcode(struct ucode_cpu_info
*uci
,
303 struct microcode_intel
*mc
,
312 * Save us the MSR write below - which is a particular expensive
313 * operation - when the other hyperthread has updated the microcode
316 *cur_rev
= intel_get_microcode_revision();
317 if (*cur_rev
>= mc
->hdr
.rev
) {
318 uci
->cpu_sig
.rev
= *cur_rev
;
323 * Writeback and invalidate caches before updating microcode to avoid
324 * internal issues depending on what the microcode is updating.
328 /* write microcode via MSR 0x79 */
329 native_wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
331 rev
= intel_get_microcode_revision();
332 if (rev
!= mc
->hdr
.rev
)
335 uci
->cpu_sig
.rev
= rev
;
336 return UCODE_UPDATED
;
339 static enum ucode_state
apply_microcode_early(struct ucode_cpu_info
*uci
)
341 struct microcode_intel
*mc
= uci
->mc
;
344 return __apply_microcode(uci
, mc
, &cur_rev
);
347 static __init
bool load_builtin_intel_microcode(struct cpio_data
*cp
)
349 unsigned int eax
= 1, ebx
, ecx
= 0, edx
;
353 if (IS_ENABLED(CONFIG_X86_32
))
356 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
358 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
359 x86_family(eax
), x86_model(eax
), x86_stepping(eax
));
361 if (firmware_request_builtin(&fw
, name
)) {
363 cp
->data
= (void *)fw
.data
;
369 static __init
struct microcode_intel
*get_microcode_blob(struct ucode_cpu_info
*uci
, bool save
)
373 intel_collect_cpu_info(&uci
->cpu_sig
);
375 if (!load_builtin_intel_microcode(&cp
))
376 cp
= find_microcode_in_initrd(ucode_path
);
378 if (!(cp
.data
&& cp
.size
))
381 return scan_microcode(cp
.data
, cp
.size
, uci
, save
);
385 * Invoked from an early init call to save the microcode blob which was
386 * selected during early boot when mm was not usable. The microcode must be
387 * saved because initrd is going away. It's an early init call so the APs
388 * just can use the pointer and do not have to scan initrd/builtin firmware
391 static int __init
save_builtin_microcode(void)
393 struct ucode_cpu_info uci
;
395 if (xchg(&ucode_patch_va
, NULL
) != UCODE_BSP_LOADED
)
398 if (dis_ucode_ldr
|| boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
401 uci
.mc
= get_microcode_blob(&uci
, true);
403 save_microcode_patch(uci
.mc
);
406 early_initcall(save_builtin_microcode
);
408 /* Load microcode on BSP from initrd or builtin blobs */
409 void __init
load_ucode_intel_bsp(struct early_load_data
*ed
)
411 struct ucode_cpu_info uci
;
413 uci
.mc
= get_microcode_blob(&uci
, false);
414 ed
->old_rev
= uci
.cpu_sig
.rev
;
416 if (uci
.mc
&& apply_microcode_early(&uci
) == UCODE_UPDATED
) {
417 ucode_patch_va
= UCODE_BSP_LOADED
;
418 ed
->new_rev
= uci
.cpu_sig
.rev
;
422 void load_ucode_intel_ap(void)
424 struct ucode_cpu_info uci
;
426 uci
.mc
= ucode_patch_va
;
428 apply_microcode_early(&uci
);
431 /* Reload microcode on resume */
432 void reload_ucode_intel(void)
434 struct ucode_cpu_info uci
= { .mc
= ucode_patch_va
, };
437 apply_microcode_early(&uci
);
440 static int collect_cpu_info(int cpu_num
, struct cpu_signature
*csig
)
442 intel_collect_cpu_info(csig
);
446 static enum ucode_state
apply_microcode_late(int cpu
)
448 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
449 struct microcode_intel
*mc
= ucode_patch_late
;
450 enum ucode_state ret
;
453 if (WARN_ON_ONCE(smp_processor_id() != cpu
))
456 ret
= __apply_microcode(uci
, mc
, &cur_rev
);
457 if (ret
!= UCODE_UPDATED
&& ret
!= UCODE_OK
)
460 cpu_data(cpu
).microcode
= uci
->cpu_sig
.rev
;
462 boot_cpu_data
.microcode
= uci
->cpu_sig
.rev
;
467 static bool ucode_validate_minrev(struct microcode_header_intel
*mc_header
)
469 int cur_rev
= boot_cpu_data
.microcode
;
472 * When late-loading, ensure the header declares a minimum revision
473 * required to perform a late-load. The previously reserved field
474 * is 0 in older microcode blobs.
476 if (!mc_header
->min_req_ver
) {
477 pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n");
482 * Check whether the current revision is either greater or equal to
483 * to the minimum revision specified in the header.
485 if (cur_rev
< mc_header
->min_req_ver
) {
486 pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev
);
487 pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header
->min_req_ver
);
493 static enum ucode_state
parse_microcode_blobs(int cpu
, struct iov_iter
*iter
)
495 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
496 bool is_safe
, new_is_safe
= false;
497 int cur_rev
= uci
->cpu_sig
.rev
;
498 unsigned int curr_mc_size
= 0;
499 u8
*new_mc
= NULL
, *mc
= NULL
;
501 while (iov_iter_count(iter
)) {
502 struct microcode_header_intel mc_header
;
503 unsigned int mc_size
, data_size
;
506 if (!copy_from_iter_full(&mc_header
, sizeof(mc_header
), iter
)) {
507 pr_err("error! Truncated or inaccessible header in microcode data file\n");
511 mc_size
= get_totalsize(&mc_header
);
512 if (mc_size
< sizeof(mc_header
)) {
513 pr_err("error! Bad data in microcode data file (totalsize too small)\n");
516 data_size
= mc_size
- sizeof(mc_header
);
517 if (data_size
> iov_iter_count(iter
)) {
518 pr_err("error! Bad data in microcode data file (truncated file?)\n");
522 /* For performance reasons, reuse mc area when possible */
523 if (!mc
|| mc_size
> curr_mc_size
) {
525 mc
= kvmalloc(mc_size
, GFP_KERNEL
);
528 curr_mc_size
= mc_size
;
531 memcpy(mc
, &mc_header
, sizeof(mc_header
));
532 data
= mc
+ sizeof(mc_header
);
533 if (!copy_from_iter_full(data
, data_size
, iter
) ||
534 intel_microcode_sanity_check(mc
, true, MC_HEADER_TYPE_MICROCODE
) < 0)
537 if (cur_rev
>= mc_header
.rev
)
540 if (!intel_find_matching_signature(mc
, &uci
->cpu_sig
))
543 is_safe
= ucode_validate_minrev(&mc_header
);
544 if (force_minrev
&& !is_safe
)
548 cur_rev
= mc_header
.rev
;
550 new_is_safe
= is_safe
;
554 if (iov_iter_count(iter
))
561 ucode_patch_late
= (struct microcode_intel
*)new_mc
;
562 return new_is_safe
? UCODE_NEW_SAFE
: UCODE_NEW
;
570 static bool is_blacklisted(unsigned int cpu
)
572 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
575 * Late loading on model 79 with microcode revision less than 0x0b000021
576 * and LLC size per core bigger than 2.5MB may result in a system hang.
577 * This behavior is documented in item BDF90, #334165 (Intel Xeon
578 * Processor E7-8800/4800 v4 Product Family).
580 if (c
->x86_vfm
== INTEL_BROADWELL_X
&&
581 c
->x86_stepping
== 0x01 &&
582 llc_size_per_core
> 2621440 &&
583 c
->microcode
< 0x0b000021) {
584 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c
->microcode
);
585 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
592 static enum ucode_state
request_microcode_fw(int cpu
, struct device
*device
)
594 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
595 const struct firmware
*firmware
;
596 struct iov_iter iter
;
597 enum ucode_state ret
;
601 if (is_blacklisted(cpu
))
604 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
605 c
->x86
, c
->x86_model
, c
->x86_stepping
);
607 if (request_firmware_direct(&firmware
, name
, device
)) {
608 pr_debug("data file %s load failed\n", name
);
612 kvec
.iov_base
= (void *)firmware
->data
;
613 kvec
.iov_len
= firmware
->size
;
614 iov_iter_kvec(&iter
, ITER_SOURCE
, &kvec
, 1, firmware
->size
);
615 ret
= parse_microcode_blobs(cpu
, &iter
);
617 release_firmware(firmware
);
622 static void finalize_late_load(int result
)
625 update_ucode_pointer(ucode_patch_late
);
627 kvfree(ucode_patch_late
);
628 ucode_patch_late
= NULL
;
631 static struct microcode_ops microcode_intel_ops
= {
632 .request_microcode_fw
= request_microcode_fw
,
633 .collect_cpu_info
= collect_cpu_info
,
634 .apply_microcode
= apply_microcode_late
,
635 .finalize_late_load
= finalize_late_load
,
636 .use_nmi
= IS_ENABLED(CONFIG_X86_64
),
639 static __init
void calc_llc_size_per_core(struct cpuinfo_x86
*c
)
641 u64 llc_size
= c
->x86_cache_size
* 1024ULL;
643 do_div(llc_size
, topology_num_cores_per_package());
644 llc_size_per_core
= (unsigned int)llc_size
;
647 struct microcode_ops
* __init
init_intel_microcode(void)
649 struct cpuinfo_x86
*c
= &boot_cpu_data
;
651 if (c
->x86_vendor
!= X86_VENDOR_INTEL
|| c
->x86
< 6 ||
652 cpu_has(c
, X86_FEATURE_IA64
)) {
653 pr_err("Intel CPU family 0x%x not supported\n", c
->x86
);
657 calc_llc_size_per_core(c
);
659 return µcode_intel_ops
;