2 * Intel CPU Microcode Update Driver for Linux
4 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
7 * Intel CPU microcode early update for Linux
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
24 #define pr_fmt(fmt) "microcode: " fmt
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
36 #include <asm/microcode_intel.h>
37 #include <asm/intel-family.h>
38 #include <asm/processor.h>
39 #include <asm/tlbflush.h>
40 #include <asm/setup.h>
43 static const char ucode_path
[] = "kernel/x86/microcode/GenuineIntel.bin";
45 /* Current microcode patch used in early patching on the APs. */
46 static struct microcode_intel
*intel_ucode_patch
;
48 /* last level cache size per core */
49 static int llc_size_per_core
;
51 static inline bool cpu_signatures_match(unsigned int s1
, unsigned int p1
,
52 unsigned int s2
, unsigned int p2
)
57 /* Processor flags are either both 0 ... */
61 /* ... or they intersect. */
66 * Returns 1 if update has been found, 0 otherwise.
68 static int find_matching_signature(void *mc
, unsigned int csig
, int cpf
)
70 struct microcode_header_intel
*mc_hdr
= mc
;
71 struct extended_sigtable
*ext_hdr
;
72 struct extended_signature
*ext_sig
;
75 if (cpu_signatures_match(csig
, cpf
, mc_hdr
->sig
, mc_hdr
->pf
))
78 /* Look for ext. headers: */
79 if (get_totalsize(mc_hdr
) <= get_datasize(mc_hdr
) + MC_HEADER_SIZE
)
82 ext_hdr
= mc
+ get_datasize(mc_hdr
) + MC_HEADER_SIZE
;
83 ext_sig
= (void *)ext_hdr
+ EXT_HEADER_SIZE
;
85 for (i
= 0; i
< ext_hdr
->count
; i
++) {
86 if (cpu_signatures_match(csig
, cpf
, ext_sig
->sig
, ext_sig
->pf
))
94 * Returns 1 if update has been found, 0 otherwise.
96 static int has_newer_microcode(void *mc
, unsigned int csig
, int cpf
, int new_rev
)
98 struct microcode_header_intel
*mc_hdr
= mc
;
100 if (mc_hdr
->rev
<= new_rev
)
103 return find_matching_signature(mc
, csig
, cpf
);
107 * Given CPU signature and a microcode patch, this function finds if the
108 * microcode patch has matching family and model with the CPU.
110 * %true - if there's a match
113 static bool microcode_matches(struct microcode_header_intel
*mc_header
,
116 unsigned long total_size
= get_totalsize(mc_header
);
117 unsigned long data_size
= get_datasize(mc_header
);
118 struct extended_sigtable
*ext_header
;
119 unsigned int fam_ucode
, model_ucode
;
120 struct extended_signature
*ext_sig
;
121 unsigned int fam
, model
;
124 fam
= x86_family(sig
);
125 model
= x86_model(sig
);
127 fam_ucode
= x86_family(mc_header
->sig
);
128 model_ucode
= x86_model(mc_header
->sig
);
130 if (fam
== fam_ucode
&& model
== model_ucode
)
133 /* Look for ext. headers: */
134 if (total_size
<= data_size
+ MC_HEADER_SIZE
)
137 ext_header
= (void *) mc_header
+ data_size
+ MC_HEADER_SIZE
;
138 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
;
139 ext_sigcount
= ext_header
->count
;
141 for (i
= 0; i
< ext_sigcount
; i
++) {
142 fam_ucode
= x86_family(ext_sig
->sig
);
143 model_ucode
= x86_model(ext_sig
->sig
);
145 if (fam
== fam_ucode
&& model
== model_ucode
)
153 static struct ucode_patch
*memdup_patch(void *data
, unsigned int size
)
155 struct ucode_patch
*p
;
157 p
= kzalloc(sizeof(struct ucode_patch
), GFP_KERNEL
);
161 p
->data
= kmemdup(data
, size
, GFP_KERNEL
);
170 static void save_microcode_patch(void *data
, unsigned int size
)
172 struct microcode_header_intel
*mc_hdr
, *mc_saved_hdr
;
173 struct ucode_patch
*iter
, *tmp
, *p
= NULL
;
174 bool prev_found
= false;
175 unsigned int sig
, pf
;
177 mc_hdr
= (struct microcode_header_intel
*)data
;
179 list_for_each_entry_safe(iter
, tmp
, µcode_cache
, plist
) {
180 mc_saved_hdr
= (struct microcode_header_intel
*)iter
->data
;
181 sig
= mc_saved_hdr
->sig
;
182 pf
= mc_saved_hdr
->pf
;
184 if (find_matching_signature(data
, sig
, pf
)) {
187 if (mc_hdr
->rev
<= mc_saved_hdr
->rev
)
190 p
= memdup_patch(data
, size
);
192 pr_err("Error allocating buffer %p\n", data
);
194 list_replace(&iter
->plist
, &p
->plist
);
199 * There weren't any previous patches found in the list cache; save the
203 p
= memdup_patch(data
, size
);
205 pr_err("Error allocating buffer for %p\n", data
);
207 list_add_tail(&p
->plist
, µcode_cache
);
214 * Save for early loading. On 32-bit, that needs to be a physical
215 * address as the APs are running from physical addresses, before
216 * paging has been enabled.
218 if (IS_ENABLED(CONFIG_X86_32
))
219 intel_ucode_patch
= (struct microcode_intel
*)__pa_nodebug(p
->data
);
221 intel_ucode_patch
= p
->data
;
224 static int microcode_sanity_check(void *mc
, int print_err
)
226 unsigned long total_size
, data_size
, ext_table_size
;
227 struct microcode_header_intel
*mc_header
= mc
;
228 struct extended_sigtable
*ext_header
= NULL
;
229 u32 sum
, orig_sum
, ext_sigcount
= 0, i
;
230 struct extended_signature
*ext_sig
;
232 total_size
= get_totalsize(mc_header
);
233 data_size
= get_datasize(mc_header
);
235 if (data_size
+ MC_HEADER_SIZE
> total_size
) {
237 pr_err("Error: bad microcode data file size.\n");
241 if (mc_header
->ldrver
!= 1 || mc_header
->hdrver
!= 1) {
243 pr_err("Error: invalid/unknown microcode update format.\n");
247 ext_table_size
= total_size
- (MC_HEADER_SIZE
+ data_size
);
248 if (ext_table_size
) {
249 u32 ext_table_sum
= 0;
252 if ((ext_table_size
< EXT_HEADER_SIZE
)
253 || ((ext_table_size
- EXT_HEADER_SIZE
) % EXT_SIGNATURE_SIZE
)) {
255 pr_err("Error: truncated extended signature table.\n");
259 ext_header
= mc
+ MC_HEADER_SIZE
+ data_size
;
260 if (ext_table_size
!= exttable_size(ext_header
)) {
262 pr_err("Error: extended signature table size mismatch.\n");
266 ext_sigcount
= ext_header
->count
;
269 * Check extended table checksum: the sum of all dwords that
270 * comprise a valid table must be 0.
272 ext_tablep
= (u32
*)ext_header
;
274 i
= ext_table_size
/ sizeof(u32
);
276 ext_table_sum
+= ext_tablep
[i
];
280 pr_warn("Bad extended signature table checksum, aborting.\n");
286 * Calculate the checksum of update data and header. The checksum of
287 * valid update data and header including the extended signature table
291 i
= (MC_HEADER_SIZE
+ data_size
) / sizeof(u32
);
293 orig_sum
+= ((u32
*)mc
)[i
];
297 pr_err("Bad microcode data checksum, aborting.\n");
305 * Check extended signature checksum: 0 => valid.
307 for (i
= 0; i
< ext_sigcount
; i
++) {
308 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
+
309 EXT_SIGNATURE_SIZE
* i
;
311 sum
= (mc_header
->sig
+ mc_header
->pf
+ mc_header
->cksum
) -
312 (ext_sig
->sig
+ ext_sig
->pf
+ ext_sig
->cksum
);
315 pr_err("Bad extended signature checksum, aborting.\n");
323 * Get microcode matching with BSP's model. Only CPUs with the same model as
324 * BSP can stay in the platform.
326 static struct microcode_intel
*
327 scan_microcode(void *data
, size_t size
, struct ucode_cpu_info
*uci
, bool save
)
329 struct microcode_header_intel
*mc_header
;
330 struct microcode_intel
*patch
= NULL
;
331 unsigned int mc_size
;
334 if (size
< sizeof(struct microcode_header_intel
))
337 mc_header
= (struct microcode_header_intel
*)data
;
339 mc_size
= get_totalsize(mc_header
);
342 microcode_sanity_check(data
, 0) < 0)
347 if (!microcode_matches(mc_header
, uci
->cpu_sig
.sig
)) {
353 save_microcode_patch(data
, mc_size
);
359 if (!has_newer_microcode(data
,
366 struct microcode_header_intel
*phdr
= &patch
->hdr
;
368 if (!has_newer_microcode(data
,
375 /* We have a newer patch, save it. */
388 static int collect_cpu_info_early(struct ucode_cpu_info
*uci
)
391 unsigned int family
, model
;
392 struct cpu_signature csig
= { 0 };
393 unsigned int eax
, ebx
, ecx
, edx
;
395 memset(uci
, 0, sizeof(*uci
));
399 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
402 family
= x86_family(eax
);
403 model
= x86_model(eax
);
405 if ((model
>= 5) || (family
> 6)) {
406 /* get processor flags from MSR 0x17 */
407 native_rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
408 csig
.pf
= 1 << ((val
[1] >> 18) & 7);
411 csig
.rev
= intel_get_microcode_revision();
419 static void show_saved_mc(void)
423 unsigned int sig
, pf
, rev
, total_size
, data_size
, date
;
424 struct ucode_cpu_info uci
;
425 struct ucode_patch
*p
;
427 if (list_empty(µcode_cache
)) {
428 pr_debug("no microcode data saved.\n");
432 collect_cpu_info_early(&uci
);
434 sig
= uci
.cpu_sig
.sig
;
436 rev
= uci
.cpu_sig
.rev
;
437 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig
, pf
, rev
);
439 list_for_each_entry(p
, µcode_cache
, plist
) {
440 struct microcode_header_intel
*mc_saved_header
;
441 struct extended_sigtable
*ext_header
;
442 struct extended_signature
*ext_sig
;
445 mc_saved_header
= (struct microcode_header_intel
*)p
->data
;
447 sig
= mc_saved_header
->sig
;
448 pf
= mc_saved_header
->pf
;
449 rev
= mc_saved_header
->rev
;
450 date
= mc_saved_header
->date
;
452 total_size
= get_totalsize(mc_saved_header
);
453 data_size
= get_datasize(mc_saved_header
);
455 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
456 i
++, sig
, pf
, rev
, total_size
,
459 (date
>> 16) & 0xff);
461 /* Look for ext. headers: */
462 if (total_size
<= data_size
+ MC_HEADER_SIZE
)
465 ext_header
= (void *)mc_saved_header
+ data_size
+ MC_HEADER_SIZE
;
466 ext_sigcount
= ext_header
->count
;
467 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
;
469 for (j
= 0; j
< ext_sigcount
; j
++) {
473 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
483 * Save this microcode patch. It will be loaded early when a CPU is
484 * hot-added or resumes.
486 static void save_mc_for_early(u8
*mc
, unsigned int size
)
488 #ifdef CONFIG_HOTPLUG_CPU
489 /* Synchronization during CPU hotplug. */
490 static DEFINE_MUTEX(x86_cpu_microcode_mutex
);
492 mutex_lock(&x86_cpu_microcode_mutex
);
494 save_microcode_patch(mc
, size
);
497 mutex_unlock(&x86_cpu_microcode_mutex
);
501 static bool load_builtin_intel_microcode(struct cpio_data
*cp
)
503 unsigned int eax
= 1, ebx
, ecx
= 0, edx
;
506 if (IS_ENABLED(CONFIG_X86_32
))
509 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
511 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
512 x86_family(eax
), x86_model(eax
), x86_stepping(eax
));
514 return get_builtin_firmware(cp
, name
);
518 * Print ucode update info.
521 print_ucode_info(struct ucode_cpu_info
*uci
, unsigned int date
)
523 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
527 (date
>> 16) & 0xff);
532 static int delay_ucode_info
;
533 static int current_mc_date
;
536 * Print early updated ucode info after printk works. This is delayed info dump.
538 void show_ucode_info_early(void)
540 struct ucode_cpu_info uci
;
542 if (delay_ucode_info
) {
543 collect_cpu_info_early(&uci
);
544 print_ucode_info(&uci
, current_mc_date
);
545 delay_ucode_info
= 0;
550 * At this point, we can not call printk() yet. Delay printing microcode info in
551 * show_ucode_info_early() until printk() works.
553 static void print_ucode(struct ucode_cpu_info
*uci
)
555 struct microcode_intel
*mc
;
556 int *delay_ucode_info_p
;
557 int *current_mc_date_p
;
563 delay_ucode_info_p
= (int *)__pa_nodebug(&delay_ucode_info
);
564 current_mc_date_p
= (int *)__pa_nodebug(¤t_mc_date
);
566 *delay_ucode_info_p
= 1;
567 *current_mc_date_p
= mc
->hdr
.date
;
571 static inline void print_ucode(struct ucode_cpu_info
*uci
)
573 struct microcode_intel
*mc
;
579 print_ucode_info(uci
, mc
->hdr
.date
);
583 static int apply_microcode_early(struct ucode_cpu_info
*uci
, bool early
)
585 struct microcode_intel
*mc
;
592 /* write microcode via MSR 0x79 */
593 native_wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
595 rev
= intel_get_microcode_revision();
596 if (rev
!= mc
->hdr
.rev
)
599 uci
->cpu_sig
.rev
= rev
;
604 print_ucode_info(uci
, mc
->hdr
.date
);
609 int __init
save_microcode_in_initrd_intel(void)
611 struct ucode_cpu_info uci
;
615 * initrd is going away, clear patch ptr. We will scan the microcode one
616 * last time before jettisoning and save a patch, if found. Then we will
617 * update that pointer too, with a stable patch address to use when
618 * resuming the cores.
620 intel_ucode_patch
= NULL
;
622 if (!load_builtin_intel_microcode(&cp
))
623 cp
= find_microcode_in_initrd(ucode_path
, false);
625 if (!(cp
.data
&& cp
.size
))
628 collect_cpu_info_early(&uci
);
630 scan_microcode(cp
.data
, cp
.size
, &uci
, true);
638 * @res_patch, output: a pointer to the patch we found.
640 static struct microcode_intel
*__load_ucode_intel(struct ucode_cpu_info
*uci
)
642 static const char *path
;
646 if (IS_ENABLED(CONFIG_X86_32
)) {
647 path
= (const char *)__pa_nodebug(ucode_path
);
654 /* try built-in microcode first */
655 if (!load_builtin_intel_microcode(&cp
))
656 cp
= find_microcode_in_initrd(path
, use_pa
);
658 if (!(cp
.data
&& cp
.size
))
661 collect_cpu_info_early(uci
);
663 return scan_microcode(cp
.data
, cp
.size
, uci
, false);
666 void __init
load_ucode_intel_bsp(void)
668 struct microcode_intel
*patch
;
669 struct ucode_cpu_info uci
;
671 patch
= __load_ucode_intel(&uci
);
677 apply_microcode_early(&uci
, true);
680 void load_ucode_intel_ap(void)
682 struct microcode_intel
*patch
, **iup
;
683 struct ucode_cpu_info uci
;
685 if (IS_ENABLED(CONFIG_X86_32
))
686 iup
= (struct microcode_intel
**) __pa_nodebug(&intel_ucode_patch
);
688 iup
= &intel_ucode_patch
;
692 patch
= __load_ucode_intel(&uci
);
701 if (apply_microcode_early(&uci
, true)) {
702 /* Mixed-silicon system? Try to refetch the proper patch: */
709 static struct microcode_intel
*find_patch(struct ucode_cpu_info
*uci
)
711 struct microcode_header_intel
*phdr
;
712 struct ucode_patch
*iter
, *tmp
;
714 list_for_each_entry_safe(iter
, tmp
, µcode_cache
, plist
) {
716 phdr
= (struct microcode_header_intel
*)iter
->data
;
718 if (phdr
->rev
<= uci
->cpu_sig
.rev
)
721 if (!find_matching_signature(phdr
,
731 void reload_ucode_intel(void)
733 struct microcode_intel
*p
;
734 struct ucode_cpu_info uci
;
736 collect_cpu_info_early(&uci
);
738 p
= find_patch(&uci
);
744 apply_microcode_early(&uci
, false);
747 static int collect_cpu_info(int cpu_num
, struct cpu_signature
*csig
)
749 static struct cpu_signature prev
;
750 struct cpuinfo_x86
*c
= &cpu_data(cpu_num
);
753 memset(csig
, 0, sizeof(*csig
));
755 csig
->sig
= cpuid_eax(0x00000001);
757 if ((c
->x86_model
>= 5) || (c
->x86
> 6)) {
758 /* get processor flags from MSR 0x17 */
759 rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
760 csig
->pf
= 1 << ((val
[1] >> 18) & 7);
763 csig
->rev
= c
->microcode
;
765 /* No extra locking on prev, races are harmless. */
766 if (csig
->sig
!= prev
.sig
|| csig
->pf
!= prev
.pf
|| csig
->rev
!= prev
.rev
) {
767 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
768 csig
->sig
, csig
->pf
, csig
->rev
);
775 static int apply_microcode_intel(int cpu
)
777 struct microcode_intel
*mc
;
778 struct ucode_cpu_info
*uci
;
779 struct cpuinfo_x86
*c
;
783 /* We should bind the task to the CPU */
784 if (WARN_ON(raw_smp_processor_id() != cpu
))
787 uci
= ucode_cpu_info
+ cpu
;
790 /* Look for a newer patch in our cache: */
791 mc
= find_patch(uci
);
796 /* write microcode via MSR 0x79 */
797 wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
799 rev
= intel_get_microcode_revision();
801 if (rev
!= mc
->hdr
.rev
) {
802 pr_err("CPU%d update to revision 0x%x failed\n",
807 if (rev
!= prev_rev
) {
808 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
810 mc
->hdr
.date
& 0xffff,
812 (mc
->hdr
.date
>> 16) & 0xff);
818 uci
->cpu_sig
.rev
= rev
;
824 static enum ucode_state
generic_load_microcode(int cpu
, void *data
, size_t size
,
825 int (*get_ucode_data
)(void *, const void *, size_t))
827 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
828 u8
*ucode_ptr
= data
, *new_mc
= NULL
, *mc
= NULL
;
829 int new_rev
= uci
->cpu_sig
.rev
;
830 unsigned int leftover
= size
;
831 unsigned int curr_mc_size
= 0, new_mc_size
= 0;
832 unsigned int csig
, cpf
;
835 struct microcode_header_intel mc_header
;
836 unsigned int mc_size
;
838 if (leftover
< sizeof(mc_header
)) {
839 pr_err("error! Truncated header in microcode data file\n");
843 if (get_ucode_data(&mc_header
, ucode_ptr
, sizeof(mc_header
)))
846 mc_size
= get_totalsize(&mc_header
);
847 if (!mc_size
|| mc_size
> leftover
) {
848 pr_err("error! Bad data in microcode data file\n");
852 /* For performance reasons, reuse mc area when possible */
853 if (!mc
|| mc_size
> curr_mc_size
) {
855 mc
= vmalloc(mc_size
);
858 curr_mc_size
= mc_size
;
861 if (get_ucode_data(mc
, ucode_ptr
, mc_size
) ||
862 microcode_sanity_check(mc
, 1) < 0) {
866 csig
= uci
->cpu_sig
.sig
;
867 cpf
= uci
->cpu_sig
.pf
;
868 if (has_newer_microcode(mc
, csig
, cpf
, new_rev
)) {
870 new_rev
= mc_header
.rev
;
872 new_mc_size
= mc_size
;
873 mc
= NULL
; /* trigger new vmalloc */
876 ucode_ptr
+= mc_size
;
891 uci
->mc
= (struct microcode_intel
*)new_mc
;
894 * If early loading microcode is supported, save this mc into
895 * permanent memory. So it will be loaded early when a CPU is hot added
898 save_mc_for_early(new_mc
, new_mc_size
);
900 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
901 cpu
, new_rev
, uci
->cpu_sig
.rev
);
906 static int get_ucode_fw(void *to
, const void *from
, size_t n
)
912 static bool is_blacklisted(unsigned int cpu
)
914 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
917 * Late loading on model 79 with microcode revision less than 0x0b000021
918 * and LLC size per core bigger than 2.5MB may result in a system hang.
919 * This behavior is documented in item BDF90, #334165 (Intel Xeon
920 * Processor E7-8800/4800 v4 Product Family).
923 c
->x86_model
== INTEL_FAM6_BROADWELL_X
&&
924 c
->x86_mask
== 0x01 &&
925 llc_size_per_core
> 2621440 &&
926 c
->microcode
< 0x0b000021) {
927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c
->microcode
);
928 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
935 static enum ucode_state
request_microcode_fw(int cpu
, struct device
*device
,
939 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
940 const struct firmware
*firmware
;
941 enum ucode_state ret
;
943 if (is_blacklisted(cpu
))
946 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
947 c
->x86
, c
->x86_model
, c
->x86_mask
);
949 if (request_firmware_direct(&firmware
, name
, device
)) {
950 pr_debug("data file %s load failed\n", name
);
954 ret
= generic_load_microcode(cpu
, (void *)firmware
->data
,
955 firmware
->size
, &get_ucode_fw
);
957 release_firmware(firmware
);
962 static int get_ucode_user(void *to
, const void *from
, size_t n
)
964 return copy_from_user(to
, from
, n
);
967 static enum ucode_state
968 request_microcode_user(int cpu
, const void __user
*buf
, size_t size
)
970 if (is_blacklisted(cpu
))
973 return generic_load_microcode(cpu
, (void *)buf
, size
, &get_ucode_user
);
976 static struct microcode_ops microcode_intel_ops
= {
977 .request_microcode_user
= request_microcode_user
,
978 .request_microcode_fw
= request_microcode_fw
,
979 .collect_cpu_info
= collect_cpu_info
,
980 .apply_microcode
= apply_microcode_intel
,
983 static int __init
calc_llc_size_per_core(struct cpuinfo_x86
*c
)
985 u64 llc_size
= c
->x86_cache_size
* 1024;
987 do_div(llc_size
, c
->x86_max_cores
);
989 return (int)llc_size
;
992 struct microcode_ops
* __init
init_intel_microcode(void)
994 struct cpuinfo_x86
*c
= &boot_cpu_data
;
996 if (c
->x86_vendor
!= X86_VENDOR_INTEL
|| c
->x86
< 6 ||
997 cpu_has(c
, X86_FEATURE_IA64
)) {
998 pr_err("Intel CPU family 0x%x not supported\n", c
->x86
);
1002 llc_size_per_core
= calc_llc_size_per_core(c
);
1004 return µcode_intel_ops
;