2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
5 * Fixes: Borislav Petkov <bp@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/earlycpio.h>
13 #include <linux/initrd.h>
16 #include <asm/setup.h>
17 #include <asm/microcode_amd.h>
20 * This points to the current valid container of microcode patches which we will
21 * save from the initrd before jettisoning its contents.
24 static size_t container_size
;
26 static u32 ucode_new_rev
;
27 u8 amd_ucode_patch
[PATCH_MAX_SIZE
];
28 static u16 this_equiv_id
;
30 static struct cpio_data ucode_cpio
;
33 * Microcode patch container file is prepended to the initrd in cpio format.
34 * See Documentation/x86/early-microcode.txt
36 static __initdata
char ucode_path
[] = "kernel/x86/microcode/AuthenticAMD.bin";
38 static struct cpio_data __init
find_ucode_in_initrd(void)
46 struct boot_params
*p
;
49 * On 32-bit, early load occurs before paging is turned on so we need
50 * to use physical addresses.
52 p
= (struct boot_params
*)__pa_nodebug(&boot_params
);
53 path
= (char *)__pa_nodebug(ucode_path
);
54 start
= (void *)p
->hdr
.ramdisk_image
;
55 size
= p
->hdr
.ramdisk_size
;
58 start
= (void *)(boot_params
.hdr
.ramdisk_image
+ PAGE_OFFSET
);
59 size
= boot_params
.hdr
.ramdisk_size
;
62 return find_cpio_data(path
, start
, size
, &offset
);
65 static size_t compute_container_size(u8
*data
, u32 total_size
)
68 u32
*header
= (u32
*)data
;
70 if (header
[0] != UCODE_MAGIC
||
71 header
[1] != UCODE_EQUIV_CPU_TABLE_TYPE
|| /* type */
72 header
[2] == 0) /* size */
75 size
= header
[2] + CONTAINER_HDR_SZ
;
84 if (header
[0] != UCODE_UCODE_TYPE
)
88 * Sanity-check patch size.
90 patch_size
= header
[1];
91 if (patch_size
> PATCH_MAX_SIZE
)
94 size
+= patch_size
+ SECTION_HDR_SIZE
;
95 data
+= patch_size
+ SECTION_HDR_SIZE
;
96 total_size
-= patch_size
+ SECTION_HDR_SIZE
;
103 * Early load occurs before we can vmalloc(). So we look for the microcode
104 * patch container file in initrd, traverse equivalent cpu table, look for a
105 * matching microcode patch, and update, all in initrd memory in place.
106 * When vmalloc() is available for use later -- on 64-bit during first AP load,
107 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
108 * load_microcode_amd() to save equivalent cpu table and microcode patches in
109 * kernel heap memory.
111 static void apply_ucode_in_initrd(void *ucode
, size_t size
, bool save_patch
)
113 struct equiv_cpu_entry
*eq
;
117 u8 (*patch
)[PATCH_MAX_SIZE
];
120 u32 rev
, eax
, ebx
, ecx
, edx
;
124 new_rev
= (u32
*)__pa_nodebug(&ucode_new_rev
);
125 cont_sz
= (size_t *)__pa_nodebug(&container_size
);
126 cont
= (u8
**)__pa_nodebug(&container
);
127 patch
= (u8 (*)[PATCH_MAX_SIZE
])__pa_nodebug(&amd_ucode_patch
);
129 new_rev
= &ucode_new_rev
;
130 cont_sz
= &container_size
;
132 patch
= &amd_ucode_patch
;
137 header
= (u32
*)data
;
139 /* find equiv cpu table */
140 if (header
[0] != UCODE_MAGIC
||
141 header
[1] != UCODE_EQUIV_CPU_TABLE_TYPE
|| /* type */
142 header
[2] == 0) /* size */
147 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
150 eq
= (struct equiv_cpu_entry
*)(data
+ CONTAINER_HDR_SZ
);
154 /* Advance past the container header */
155 offset
= header
[2] + CONTAINER_HDR_SZ
;
159 eq_id
= find_equiv_id(eq
, eax
);
161 this_equiv_id
= eq_id
;
162 *cont_sz
= compute_container_size(*cont
, left
+ offset
);
165 * truncate how much we need to iterate over in the
166 * ucode update loop below
168 left
= *cont_sz
- offset
;
173 * support multiple container files appended together. if this
174 * one does not have a matching equivalent cpu entry, we fast
175 * forward to the next container file.
178 header
= (u32
*)data
;
179 if (header
[0] == UCODE_MAGIC
&&
180 header
[1] == UCODE_EQUIV_CPU_TABLE_TYPE
)
183 offset
= header
[1] + SECTION_HDR_SIZE
;
188 /* mark where the next microcode container file starts */
189 offset
= data
- (u8
*)ucode
;
199 /* find ucode and update if needed */
201 native_rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, eax
);
204 struct microcode_amd
*mc
;
206 header
= (u32
*)data
;
207 if (header
[0] != UCODE_UCODE_TYPE
|| /* type */
208 header
[1] == 0) /* size */
211 mc
= (struct microcode_amd
*)(data
+ SECTION_HDR_SIZE
);
213 if (eq_id
== mc
->hdr
.processor_rev_id
&& rev
< mc
->hdr
.patch_id
) {
215 if (!__apply_microcode_amd(mc
)) {
216 rev
= mc
->hdr
.patch_id
;
221 min_t(u32
, header
[1], PATCH_MAX_SIZE
));
225 offset
= header
[1] + SECTION_HDR_SIZE
;
231 void __init
load_ucode_amd_bsp(void)
238 data
= (void **)__pa_nodebug(&ucode_cpio
.data
);
239 size
= (size_t *)__pa_nodebug(&ucode_cpio
.size
);
241 data
= &ucode_cpio
.data
;
242 size
= &ucode_cpio
.size
;
245 cp
= find_ucode_in_initrd();
252 apply_ucode_in_initrd(cp
.data
, cp
.size
, true);
257 * On 32-bit, since AP's early load occurs before paging is turned on, we
258 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
259 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
260 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
261 * which is used upon resume from suspend.
263 void load_ucode_amd_ap(void)
265 struct microcode_amd
*mc
;
269 mc
= (struct microcode_amd
*)__pa_nodebug(amd_ucode_patch
);
270 if (mc
->hdr
.patch_id
&& mc
->hdr
.processor_rev_id
) {
271 __apply_microcode_amd(mc
);
275 ucode
= (void *)__pa_nodebug(&container
);
276 usize
= (size_t *)__pa_nodebug(&container_size
);
278 if (!*ucode
|| !*usize
)
281 apply_ucode_in_initrd(*ucode
, *usize
, false);
284 static void __init
collect_cpu_sig_on_bsp(void *arg
)
286 unsigned int cpu
= smp_processor_id();
287 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
289 uci
->cpu_sig
.sig
= cpuid_eax(0x00000001);
292 static void __init
get_bsp_sig(void)
294 unsigned int bsp
= boot_cpu_data
.cpu_index
;
295 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ bsp
;
297 if (!uci
->cpu_sig
.sig
)
298 smp_call_function_single(bsp
, collect_cpu_sig_on_bsp
, NULL
, 1);
301 void load_ucode_amd_ap(void)
303 unsigned int cpu
= smp_processor_id();
304 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
305 struct equiv_cpu_entry
*eq
;
306 struct microcode_amd
*mc
;
310 /* Exit if called on the BSP. */
317 rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, eax
);
319 uci
->cpu_sig
.rev
= rev
;
320 uci
->cpu_sig
.sig
= eax
;
322 eax
= cpuid_eax(0x00000001);
323 eq
= (struct equiv_cpu_entry
*)(container
+ CONTAINER_HDR_SZ
);
325 eq_id
= find_equiv_id(eq
, eax
);
329 if (eq_id
== this_equiv_id
) {
330 mc
= (struct microcode_amd
*)amd_ucode_patch
;
332 if (mc
&& rev
< mc
->hdr
.patch_id
) {
333 if (!__apply_microcode_amd(mc
))
334 ucode_new_rev
= mc
->hdr
.patch_id
;
338 if (!ucode_cpio
.data
)
342 * AP has a different equivalence ID than BSP, looks like
343 * mixed-steppings silicon so go through the ucode blob anew.
345 apply_ucode_in_initrd(ucode_cpio
.data
, ucode_cpio
.size
, false);
350 int __init
save_microcode_in_initrd_amd(void)
354 enum ucode_state ret
;
363 cont
= (unsigned long)container
;
364 cont_va
= __va(container
);
367 * We need the physical address of the container for both bitness since
368 * boot_params.hdr.ramdisk_image is a physical address.
370 cont
= __pa(container
);
375 * Take into account the fact that the ramdisk might get relocated and
376 * therefore we need to recompute the container's position in virtual
379 if (relocated_ramdisk
)
380 container
= (u8
*)(__va(relocated_ramdisk
) +
381 (cont
- boot_params
.hdr
.ramdisk_image
));
386 pr_info("microcode: updated early to new patch_level=0x%08x\n",
389 eax
= cpuid_eax(0x00000001);
390 eax
= ((eax
>> 8) & 0xf) + ((eax
>> 20) & 0xff);
392 ret
= load_microcode_amd(smp_processor_id(), eax
, container
, container_size
);
397 * This will be freed any msec now, stash patches for the current
398 * family and switch to patch cache for cpu hotplug, etc later.
406 void reload_ucode_amd(void)
408 struct microcode_amd
*mc
;
411 rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, eax
);
413 mc
= (struct microcode_amd
*)amd_ucode_patch
;
415 if (mc
&& rev
< mc
->hdr
.patch_id
) {
416 if (!__apply_microcode_amd(mc
)) {
417 ucode_new_rev
= mc
->hdr
.patch_id
;
418 pr_info("microcode: reload patch_level=0x%08x\n",