2 * Intel CPU Microcode Update Driver for Linux
4 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
7 * Intel CPU microcode early update for Linux
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
24 #define pr_fmt(fmt) "microcode: " fmt
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
36 #include <asm/microcode_intel.h>
37 #include <asm/intel-family.h>
38 #include <asm/processor.h>
39 #include <asm/tlbflush.h>
40 #include <asm/setup.h>
43 static const char ucode_path
[] = "kernel/x86/microcode/GenuineIntel.bin";
45 /* Current microcode patch used in early patching on the APs. */
46 static struct microcode_intel
*intel_ucode_patch
;
48 /* last level cache size per core */
49 static int llc_size_per_core
;
51 static inline bool cpu_signatures_match(unsigned int s1
, unsigned int p1
,
52 unsigned int s2
, unsigned int p2
)
57 /* Processor flags are either both 0 ... */
61 /* ... or they intersect. */
66 * Returns 1 if update has been found, 0 otherwise.
68 static int find_matching_signature(void *mc
, unsigned int csig
, int cpf
)
70 struct microcode_header_intel
*mc_hdr
= mc
;
71 struct extended_sigtable
*ext_hdr
;
72 struct extended_signature
*ext_sig
;
75 if (cpu_signatures_match(csig
, cpf
, mc_hdr
->sig
, mc_hdr
->pf
))
78 /* Look for ext. headers: */
79 if (get_totalsize(mc_hdr
) <= get_datasize(mc_hdr
) + MC_HEADER_SIZE
)
82 ext_hdr
= mc
+ get_datasize(mc_hdr
) + MC_HEADER_SIZE
;
83 ext_sig
= (void *)ext_hdr
+ EXT_HEADER_SIZE
;
85 for (i
= 0; i
< ext_hdr
->count
; i
++) {
86 if (cpu_signatures_match(csig
, cpf
, ext_sig
->sig
, ext_sig
->pf
))
94 * Returns 1 if update has been found, 0 otherwise.
96 static int has_newer_microcode(void *mc
, unsigned int csig
, int cpf
, int new_rev
)
98 struct microcode_header_intel
*mc_hdr
= mc
;
100 if (mc_hdr
->rev
<= new_rev
)
103 return find_matching_signature(mc
, csig
, cpf
);
107 * Given CPU signature and a microcode patch, this function finds if the
108 * microcode patch has matching family and model with the CPU.
110 * %true - if there's a match
113 static bool microcode_matches(struct microcode_header_intel
*mc_header
,
116 unsigned long total_size
= get_totalsize(mc_header
);
117 unsigned long data_size
= get_datasize(mc_header
);
118 struct extended_sigtable
*ext_header
;
119 unsigned int fam_ucode
, model_ucode
;
120 struct extended_signature
*ext_sig
;
121 unsigned int fam
, model
;
124 fam
= x86_family(sig
);
125 model
= x86_model(sig
);
127 fam_ucode
= x86_family(mc_header
->sig
);
128 model_ucode
= x86_model(mc_header
->sig
);
130 if (fam
== fam_ucode
&& model
== model_ucode
)
133 /* Look for ext. headers: */
134 if (total_size
<= data_size
+ MC_HEADER_SIZE
)
137 ext_header
= (void *) mc_header
+ data_size
+ MC_HEADER_SIZE
;
138 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
;
139 ext_sigcount
= ext_header
->count
;
141 for (i
= 0; i
< ext_sigcount
; i
++) {
142 fam_ucode
= x86_family(ext_sig
->sig
);
143 model_ucode
= x86_model(ext_sig
->sig
);
145 if (fam
== fam_ucode
&& model
== model_ucode
)
153 static struct ucode_patch
*memdup_patch(void *data
, unsigned int size
)
155 struct ucode_patch
*p
;
157 p
= kzalloc(sizeof(struct ucode_patch
), GFP_KERNEL
);
161 p
->data
= kmemdup(data
, size
, GFP_KERNEL
);
170 static void save_microcode_patch(void *data
, unsigned int size
)
172 struct microcode_header_intel
*mc_hdr
, *mc_saved_hdr
;
173 struct ucode_patch
*iter
, *tmp
, *p
= NULL
;
174 bool prev_found
= false;
175 unsigned int sig
, pf
;
177 mc_hdr
= (struct microcode_header_intel
*)data
;
179 list_for_each_entry_safe(iter
, tmp
, µcode_cache
, plist
) {
180 mc_saved_hdr
= (struct microcode_header_intel
*)iter
->data
;
181 sig
= mc_saved_hdr
->sig
;
182 pf
= mc_saved_hdr
->pf
;
184 if (find_matching_signature(data
, sig
, pf
)) {
187 if (mc_hdr
->rev
<= mc_saved_hdr
->rev
)
190 p
= memdup_patch(data
, size
);
192 pr_err("Error allocating buffer %p\n", data
);
194 list_replace(&iter
->plist
, &p
->plist
);
202 * There weren't any previous patches found in the list cache; save the
206 p
= memdup_patch(data
, size
);
208 pr_err("Error allocating buffer for %p\n", data
);
210 list_add_tail(&p
->plist
, µcode_cache
);
217 * Save for early loading. On 32-bit, that needs to be a physical
218 * address as the APs are running from physical addresses, before
219 * paging has been enabled.
221 if (IS_ENABLED(CONFIG_X86_32
))
222 intel_ucode_patch
= (struct microcode_intel
*)__pa_nodebug(p
->data
);
224 intel_ucode_patch
= p
->data
;
227 static int microcode_sanity_check(void *mc
, int print_err
)
229 unsigned long total_size
, data_size
, ext_table_size
;
230 struct microcode_header_intel
*mc_header
= mc
;
231 struct extended_sigtable
*ext_header
= NULL
;
232 u32 sum
, orig_sum
, ext_sigcount
= 0, i
;
233 struct extended_signature
*ext_sig
;
235 total_size
= get_totalsize(mc_header
);
236 data_size
= get_datasize(mc_header
);
238 if (data_size
+ MC_HEADER_SIZE
> total_size
) {
240 pr_err("Error: bad microcode data file size.\n");
244 if (mc_header
->ldrver
!= 1 || mc_header
->hdrver
!= 1) {
246 pr_err("Error: invalid/unknown microcode update format.\n");
250 ext_table_size
= total_size
- (MC_HEADER_SIZE
+ data_size
);
251 if (ext_table_size
) {
252 u32 ext_table_sum
= 0;
255 if ((ext_table_size
< EXT_HEADER_SIZE
)
256 || ((ext_table_size
- EXT_HEADER_SIZE
) % EXT_SIGNATURE_SIZE
)) {
258 pr_err("Error: truncated extended signature table.\n");
262 ext_header
= mc
+ MC_HEADER_SIZE
+ data_size
;
263 if (ext_table_size
!= exttable_size(ext_header
)) {
265 pr_err("Error: extended signature table size mismatch.\n");
269 ext_sigcount
= ext_header
->count
;
272 * Check extended table checksum: the sum of all dwords that
273 * comprise a valid table must be 0.
275 ext_tablep
= (u32
*)ext_header
;
277 i
= ext_table_size
/ sizeof(u32
);
279 ext_table_sum
+= ext_tablep
[i
];
283 pr_warn("Bad extended signature table checksum, aborting.\n");
289 * Calculate the checksum of update data and header. The checksum of
290 * valid update data and header including the extended signature table
294 i
= (MC_HEADER_SIZE
+ data_size
) / sizeof(u32
);
296 orig_sum
+= ((u32
*)mc
)[i
];
300 pr_err("Bad microcode data checksum, aborting.\n");
308 * Check extended signature checksum: 0 => valid.
310 for (i
= 0; i
< ext_sigcount
; i
++) {
311 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
+
312 EXT_SIGNATURE_SIZE
* i
;
314 sum
= (mc_header
->sig
+ mc_header
->pf
+ mc_header
->cksum
) -
315 (ext_sig
->sig
+ ext_sig
->pf
+ ext_sig
->cksum
);
318 pr_err("Bad extended signature checksum, aborting.\n");
326 * Get microcode matching with BSP's model. Only CPUs with the same model as
327 * BSP can stay in the platform.
329 static struct microcode_intel
*
330 scan_microcode(void *data
, size_t size
, struct ucode_cpu_info
*uci
, bool save
)
332 struct microcode_header_intel
*mc_header
;
333 struct microcode_intel
*patch
= NULL
;
334 unsigned int mc_size
;
337 if (size
< sizeof(struct microcode_header_intel
))
340 mc_header
= (struct microcode_header_intel
*)data
;
342 mc_size
= get_totalsize(mc_header
);
345 microcode_sanity_check(data
, 0) < 0)
350 if (!microcode_matches(mc_header
, uci
->cpu_sig
.sig
)) {
356 save_microcode_patch(data
, mc_size
);
362 if (!has_newer_microcode(data
,
369 struct microcode_header_intel
*phdr
= &patch
->hdr
;
371 if (!has_newer_microcode(data
,
378 /* We have a newer patch, save it. */
391 static int collect_cpu_info_early(struct ucode_cpu_info
*uci
)
394 unsigned int family
, model
;
395 struct cpu_signature csig
= { 0 };
396 unsigned int eax
, ebx
, ecx
, edx
;
398 memset(uci
, 0, sizeof(*uci
));
402 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
405 family
= x86_family(eax
);
406 model
= x86_model(eax
);
408 if ((model
>= 5) || (family
> 6)) {
409 /* get processor flags from MSR 0x17 */
410 native_rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
411 csig
.pf
= 1 << ((val
[1] >> 18) & 7);
414 csig
.rev
= intel_get_microcode_revision();
422 static void show_saved_mc(void)
426 unsigned int sig
, pf
, rev
, total_size
, data_size
, date
;
427 struct ucode_cpu_info uci
;
428 struct ucode_patch
*p
;
430 if (list_empty(µcode_cache
)) {
431 pr_debug("no microcode data saved.\n");
435 collect_cpu_info_early(&uci
);
437 sig
= uci
.cpu_sig
.sig
;
439 rev
= uci
.cpu_sig
.rev
;
440 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig
, pf
, rev
);
442 list_for_each_entry(p
, µcode_cache
, plist
) {
443 struct microcode_header_intel
*mc_saved_header
;
444 struct extended_sigtable
*ext_header
;
445 struct extended_signature
*ext_sig
;
448 mc_saved_header
= (struct microcode_header_intel
*)p
->data
;
450 sig
= mc_saved_header
->sig
;
451 pf
= mc_saved_header
->pf
;
452 rev
= mc_saved_header
->rev
;
453 date
= mc_saved_header
->date
;
455 total_size
= get_totalsize(mc_saved_header
);
456 data_size
= get_datasize(mc_saved_header
);
458 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
459 i
++, sig
, pf
, rev
, total_size
,
462 (date
>> 16) & 0xff);
464 /* Look for ext. headers: */
465 if (total_size
<= data_size
+ MC_HEADER_SIZE
)
468 ext_header
= (void *)mc_saved_header
+ data_size
+ MC_HEADER_SIZE
;
469 ext_sigcount
= ext_header
->count
;
470 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
;
472 for (j
= 0; j
< ext_sigcount
; j
++) {
476 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
486 * Save this microcode patch. It will be loaded early when a CPU is
487 * hot-added or resumes.
489 static void save_mc_for_early(u8
*mc
, unsigned int size
)
491 /* Synchronization during CPU hotplug. */
492 static DEFINE_MUTEX(x86_cpu_microcode_mutex
);
494 mutex_lock(&x86_cpu_microcode_mutex
);
496 save_microcode_patch(mc
, size
);
499 mutex_unlock(&x86_cpu_microcode_mutex
);
502 static bool load_builtin_intel_microcode(struct cpio_data
*cp
)
504 unsigned int eax
= 1, ebx
, ecx
= 0, edx
;
507 if (IS_ENABLED(CONFIG_X86_32
))
510 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
512 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
513 x86_family(eax
), x86_model(eax
), x86_stepping(eax
));
515 return get_builtin_firmware(cp
, name
);
519 * Print ucode update info.
522 print_ucode_info(struct ucode_cpu_info
*uci
, unsigned int date
)
524 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
528 (date
>> 16) & 0xff);
533 static int delay_ucode_info
;
534 static int current_mc_date
;
537 * Print early updated ucode info after printk works. This is delayed info dump.
539 void show_ucode_info_early(void)
541 struct ucode_cpu_info uci
;
543 if (delay_ucode_info
) {
544 collect_cpu_info_early(&uci
);
545 print_ucode_info(&uci
, current_mc_date
);
546 delay_ucode_info
= 0;
551 * At this point, we can not call printk() yet. Delay printing microcode info in
552 * show_ucode_info_early() until printk() works.
554 static void print_ucode(struct ucode_cpu_info
*uci
)
556 struct microcode_intel
*mc
;
557 int *delay_ucode_info_p
;
558 int *current_mc_date_p
;
564 delay_ucode_info_p
= (int *)__pa_nodebug(&delay_ucode_info
);
565 current_mc_date_p
= (int *)__pa_nodebug(¤t_mc_date
);
567 *delay_ucode_info_p
= 1;
568 *current_mc_date_p
= mc
->hdr
.date
;
572 static inline void print_ucode(struct ucode_cpu_info
*uci
)
574 struct microcode_intel
*mc
;
580 print_ucode_info(uci
, mc
->hdr
.date
);
584 static int apply_microcode_early(struct ucode_cpu_info
*uci
, bool early
)
586 struct microcode_intel
*mc
;
594 * Save us the MSR write below - which is a particular expensive
595 * operation - when the other hyperthread has updated the microcode
598 rev
= intel_get_microcode_revision();
599 if (rev
>= mc
->hdr
.rev
) {
600 uci
->cpu_sig
.rev
= rev
;
605 * Writeback and invalidate caches before updating microcode to avoid
606 * internal issues depending on what the microcode is updating.
610 /* write microcode via MSR 0x79 */
611 native_wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
613 rev
= intel_get_microcode_revision();
614 if (rev
!= mc
->hdr
.rev
)
617 uci
->cpu_sig
.rev
= rev
;
622 print_ucode_info(uci
, mc
->hdr
.date
);
627 int __init
save_microcode_in_initrd_intel(void)
629 struct ucode_cpu_info uci
;
633 * initrd is going away, clear patch ptr. We will scan the microcode one
634 * last time before jettisoning and save a patch, if found. Then we will
635 * update that pointer too, with a stable patch address to use when
636 * resuming the cores.
638 intel_ucode_patch
= NULL
;
640 if (!load_builtin_intel_microcode(&cp
))
641 cp
= find_microcode_in_initrd(ucode_path
, false);
643 if (!(cp
.data
&& cp
.size
))
646 collect_cpu_info_early(&uci
);
648 scan_microcode(cp
.data
, cp
.size
, &uci
, true);
656 * @res_patch, output: a pointer to the patch we found.
658 static struct microcode_intel
*__load_ucode_intel(struct ucode_cpu_info
*uci
)
660 static const char *path
;
664 if (IS_ENABLED(CONFIG_X86_32
)) {
665 path
= (const char *)__pa_nodebug(ucode_path
);
672 /* try built-in microcode first */
673 if (!load_builtin_intel_microcode(&cp
))
674 cp
= find_microcode_in_initrd(path
, use_pa
);
676 if (!(cp
.data
&& cp
.size
))
679 collect_cpu_info_early(uci
);
681 return scan_microcode(cp
.data
, cp
.size
, uci
, false);
684 void __init
load_ucode_intel_bsp(void)
686 struct microcode_intel
*patch
;
687 struct ucode_cpu_info uci
;
689 patch
= __load_ucode_intel(&uci
);
695 apply_microcode_early(&uci
, true);
698 void load_ucode_intel_ap(void)
700 struct microcode_intel
*patch
, **iup
;
701 struct ucode_cpu_info uci
;
703 if (IS_ENABLED(CONFIG_X86_32
))
704 iup
= (struct microcode_intel
**) __pa_nodebug(&intel_ucode_patch
);
706 iup
= &intel_ucode_patch
;
710 patch
= __load_ucode_intel(&uci
);
719 if (apply_microcode_early(&uci
, true)) {
720 /* Mixed-silicon system? Try to refetch the proper patch: */
727 static struct microcode_intel
*find_patch(struct ucode_cpu_info
*uci
)
729 struct microcode_header_intel
*phdr
;
730 struct ucode_patch
*iter
, *tmp
;
732 list_for_each_entry_safe(iter
, tmp
, µcode_cache
, plist
) {
734 phdr
= (struct microcode_header_intel
*)iter
->data
;
736 if (phdr
->rev
<= uci
->cpu_sig
.rev
)
739 if (!find_matching_signature(phdr
,
749 void reload_ucode_intel(void)
751 struct microcode_intel
*p
;
752 struct ucode_cpu_info uci
;
754 collect_cpu_info_early(&uci
);
756 p
= find_patch(&uci
);
762 apply_microcode_early(&uci
, false);
765 static int collect_cpu_info(int cpu_num
, struct cpu_signature
*csig
)
767 static struct cpu_signature prev
;
768 struct cpuinfo_x86
*c
= &cpu_data(cpu_num
);
771 memset(csig
, 0, sizeof(*csig
));
773 csig
->sig
= cpuid_eax(0x00000001);
775 if ((c
->x86_model
>= 5) || (c
->x86
> 6)) {
776 /* get processor flags from MSR 0x17 */
777 rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
778 csig
->pf
= 1 << ((val
[1] >> 18) & 7);
781 csig
->rev
= c
->microcode
;
783 /* No extra locking on prev, races are harmless. */
784 if (csig
->sig
!= prev
.sig
|| csig
->pf
!= prev
.pf
|| csig
->rev
!= prev
.rev
) {
785 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
786 csig
->sig
, csig
->pf
, csig
->rev
);
793 static enum ucode_state
apply_microcode_intel(int cpu
)
795 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
796 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
797 struct microcode_intel
*mc
;
798 enum ucode_state ret
;
802 /* We should bind the task to the CPU */
803 if (WARN_ON(raw_smp_processor_id() != cpu
))
806 /* Look for a newer patch in our cache: */
807 mc
= find_patch(uci
);
815 * Save us the MSR write below - which is a particular expensive
816 * operation - when the other hyperthread has updated the microcode
819 rev
= intel_get_microcode_revision();
820 if (rev
>= mc
->hdr
.rev
) {
826 * Writeback and invalidate caches before updating microcode to avoid
827 * internal issues depending on what the microcode is updating.
831 /* write microcode via MSR 0x79 */
832 wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
834 rev
= intel_get_microcode_revision();
836 if (rev
!= mc
->hdr
.rev
) {
837 pr_err("CPU%d update to revision 0x%x failed\n",
842 if (rev
!= prev_rev
) {
843 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
845 mc
->hdr
.date
& 0xffff,
847 (mc
->hdr
.date
>> 16) & 0xff);
854 uci
->cpu_sig
.rev
= rev
;
857 /* Update boot_cpu_data's revision too, if we're on the BSP: */
858 if (c
->cpu_index
== boot_cpu_data
.cpu_index
)
859 boot_cpu_data
.microcode
= rev
;
864 static enum ucode_state
generic_load_microcode(int cpu
, void *data
, size_t size
,
865 int (*get_ucode_data
)(void *, const void *, size_t))
867 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
868 u8
*ucode_ptr
= data
, *new_mc
= NULL
, *mc
= NULL
;
869 int new_rev
= uci
->cpu_sig
.rev
;
870 unsigned int leftover
= size
;
871 unsigned int curr_mc_size
= 0, new_mc_size
= 0;
872 unsigned int csig
, cpf
;
873 enum ucode_state ret
= UCODE_OK
;
876 struct microcode_header_intel mc_header
;
877 unsigned int mc_size
;
879 if (leftover
< sizeof(mc_header
)) {
880 pr_err("error! Truncated header in microcode data file\n");
884 if (get_ucode_data(&mc_header
, ucode_ptr
, sizeof(mc_header
)))
887 mc_size
= get_totalsize(&mc_header
);
888 if (!mc_size
|| mc_size
> leftover
) {
889 pr_err("error! Bad data in microcode data file\n");
893 /* For performance reasons, reuse mc area when possible */
894 if (!mc
|| mc_size
> curr_mc_size
) {
896 mc
= vmalloc(mc_size
);
899 curr_mc_size
= mc_size
;
902 if (get_ucode_data(mc
, ucode_ptr
, mc_size
) ||
903 microcode_sanity_check(mc
, 1) < 0) {
907 csig
= uci
->cpu_sig
.sig
;
908 cpf
= uci
->cpu_sig
.pf
;
909 if (has_newer_microcode(mc
, csig
, cpf
, new_rev
)) {
911 new_rev
= mc_header
.rev
;
913 new_mc_size
= mc_size
;
914 mc
= NULL
; /* trigger new vmalloc */
918 ucode_ptr
+= mc_size
;
933 uci
->mc
= (struct microcode_intel
*)new_mc
;
936 * If early loading microcode is supported, save this mc into
937 * permanent memory. So it will be loaded early when a CPU is hot added
940 save_mc_for_early(new_mc
, new_mc_size
);
942 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
943 cpu
, new_rev
, uci
->cpu_sig
.rev
);
948 static int get_ucode_fw(void *to
, const void *from
, size_t n
)
954 static bool is_blacklisted(unsigned int cpu
)
956 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
959 * Late loading on model 79 with microcode revision less than 0x0b000021
960 * and LLC size per core bigger than 2.5MB may result in a system hang.
961 * This behavior is documented in item BDF90, #334165 (Intel Xeon
962 * Processor E7-8800/4800 v4 Product Family).
965 c
->x86_model
== INTEL_FAM6_BROADWELL_X
&&
966 c
->x86_stepping
== 0x01 &&
967 llc_size_per_core
> 2621440 &&
968 c
->microcode
< 0x0b000021) {
969 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c
->microcode
);
970 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
977 static enum ucode_state
request_microcode_fw(int cpu
, struct device
*device
,
981 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
982 const struct firmware
*firmware
;
983 enum ucode_state ret
;
985 if (is_blacklisted(cpu
))
988 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
989 c
->x86
, c
->x86_model
, c
->x86_stepping
);
991 if (request_firmware_direct(&firmware
, name
, device
)) {
992 pr_debug("data file %s load failed\n", name
);
996 ret
= generic_load_microcode(cpu
, (void *)firmware
->data
,
997 firmware
->size
, &get_ucode_fw
);
999 release_firmware(firmware
);
1004 static int get_ucode_user(void *to
, const void *from
, size_t n
)
1006 return copy_from_user(to
, from
, n
);
1009 static enum ucode_state
1010 request_microcode_user(int cpu
, const void __user
*buf
, size_t size
)
1012 if (is_blacklisted(cpu
))
1013 return UCODE_NFOUND
;
1015 return generic_load_microcode(cpu
, (void *)buf
, size
, &get_ucode_user
);
1018 static struct microcode_ops microcode_intel_ops
= {
1019 .request_microcode_user
= request_microcode_user
,
1020 .request_microcode_fw
= request_microcode_fw
,
1021 .collect_cpu_info
= collect_cpu_info
,
1022 .apply_microcode
= apply_microcode_intel
,
1025 static int __init
calc_llc_size_per_core(struct cpuinfo_x86
*c
)
1027 u64 llc_size
= c
->x86_cache_size
* 1024ULL;
1029 do_div(llc_size
, c
->x86_max_cores
);
1031 return (int)llc_size
;
1034 struct microcode_ops
* __init
init_intel_microcode(void)
1036 struct cpuinfo_x86
*c
= &boot_cpu_data
;
1038 if (c
->x86_vendor
!= X86_VENDOR_INTEL
|| c
->x86
< 6 ||
1039 cpu_has(c
, X86_FEATURE_IA64
)) {
1040 pr_err("Intel CPU family 0x%x not supported\n", c
->x86
);
1044 llc_size_per_core
= calc_llc_size_per_core(c
);
1046 return µcode_intel_ops
;