2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/earlycpio.h>
12 #include <linux/initrd.h>
15 #include <asm/setup.h>
16 #include <asm/microcode_amd.h>
18 static bool ucode_loaded
;
19 static u32 ucode_new_rev
;
20 static unsigned long ucode_offset
;
21 static size_t ucode_size
;
24 * Microcode patch container file is prepended to the initrd in cpio format.
25 * See Documentation/x86/early-microcode.txt
27 static __initdata
char ucode_path
[] = "kernel/x86/microcode/AuthenticAMD.bin";
29 static struct cpio_data __init
find_ucode_in_initrd(void)
35 unsigned long *uoffset
;
40 struct boot_params
*p
;
43 * On 32-bit, early load occurs before paging is turned on so we need
44 * to use physical addresses.
46 p
= (struct boot_params
*)__pa_nodebug(&boot_params
);
47 path
= (char *)__pa_nodebug(ucode_path
);
48 start
= (void *)p
->hdr
.ramdisk_image
;
49 size
= p
->hdr
.ramdisk_size
;
50 uoffset
= (unsigned long *)__pa_nodebug(&ucode_offset
);
51 usize
= (size_t *)__pa_nodebug(&ucode_size
);
54 start
= (void *)(boot_params
.hdr
.ramdisk_image
+ PAGE_OFFSET
);
55 size
= boot_params
.hdr
.ramdisk_size
;
56 uoffset
= &ucode_offset
;
60 cd
= find_cpio_data(path
, start
, size
, &offset
);
64 if (*(u32
*)cd
.data
!= UCODE_MAGIC
) {
70 *uoffset
= (u8
*)cd
.data
- (u8
*)start
;
77 * Early load occurs before we can vmalloc(). So we look for the microcode
78 * patch container file in initrd, traverse equivalent cpu table, look for a
79 * matching microcode patch, and update, all in initrd memory in place.
80 * When vmalloc() is available for use later -- on 64-bit during first AP load,
81 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
82 * load_microcode_amd() to save equivalent cpu table and microcode patches in
85 static void apply_ucode_in_initrd(void *ucode
, size_t size
)
87 struct equiv_cpu_entry
*eq
;
94 unsigned long *uoffset
;
98 new_rev
= (u32
*)__pa_nodebug(&ucode_new_rev
);
99 uoffset
= (unsigned long *)__pa_nodebug(&ucode_offset
);
100 usize
= (size_t *)__pa_nodebug(&ucode_size
);
102 new_rev
= &ucode_new_rev
;
103 uoffset
= &ucode_offset
;
109 header
= (u32
*)data
;
111 /* find equiv cpu table */
113 if (header
[1] != UCODE_EQUIV_CPU_TABLE_TYPE
|| /* type */
114 header
[2] == 0) /* size */
117 eax
= cpuid_eax(0x00000001);
120 eq
= (struct equiv_cpu_entry
*)(data
+ CONTAINER_HDR_SZ
);
122 offset
= header
[2] + CONTAINER_HDR_SZ
;
126 eq_id
= find_equiv_id(eq
, eax
);
131 * support multiple container files appended together. if this
132 * one does not have a matching equivalent cpu entry, we fast
133 * forward to the next container file.
136 header
= (u32
*)data
;
137 if (header
[0] == UCODE_MAGIC
&&
138 header
[1] == UCODE_EQUIV_CPU_TABLE_TYPE
)
141 offset
= header
[1] + SECTION_HDR_SIZE
;
146 /* mark where the next microcode container file starts */
147 offset
= data
- (u8
*)ucode
;
158 /* find ucode and update if needed */
160 rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, eax
);
163 struct microcode_amd
*mc
;
165 header
= (u32
*)data
;
166 if (header
[0] != UCODE_UCODE_TYPE
|| /* type */
167 header
[1] == 0) /* size */
170 mc
= (struct microcode_amd
*)(data
+ SECTION_HDR_SIZE
);
171 if (eq_id
== mc
->hdr
.processor_rev_id
&& rev
< mc
->hdr
.patch_id
)
172 if (__apply_microcode_amd(mc
) == 0) {
173 rev
= mc
->hdr
.patch_id
;
177 offset
= header
[1] + SECTION_HDR_SIZE
;
182 /* mark where this microcode container file ends */
183 offset
= *usize
- (data
- (u8
*)ucode
);
190 void __init
load_ucode_amd_bsp(void)
192 struct cpio_data cd
= find_ucode_in_initrd();
196 apply_ucode_in_initrd(cd
.data
, cd
.size
);
200 u8 amd_bsp_mpb
[MPB_MAX_SIZE
];
203 * On 32-bit, since AP's early load occurs before paging is turned on, we
204 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
205 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
207 * is used upon resume from suspend.
209 void load_ucode_amd_ap(void)
211 struct microcode_amd
*mc
;
212 unsigned long *initrd
;
213 unsigned long *uoffset
;
217 mc
= (struct microcode_amd
*)__pa(amd_bsp_mpb
);
218 if (mc
->hdr
.patch_id
&& mc
->hdr
.processor_rev_id
) {
219 __apply_microcode_amd(mc
);
223 initrd
= (unsigned long *)__pa(&initrd_start
);
224 uoffset
= (unsigned long *)__pa(&ucode_offset
);
225 usize
= (size_t *)__pa(&ucode_size
);
227 if (!*usize
|| !*initrd
)
230 ucode
= (void *)((unsigned long)__pa(*initrd
) + *uoffset
);
231 apply_ucode_in_initrd(ucode
, *usize
);
234 static void __init
collect_cpu_sig_on_bsp(void *arg
)
236 unsigned int cpu
= smp_processor_id();
237 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
238 uci
->cpu_sig
.sig
= cpuid_eax(0x00000001);
241 void load_ucode_amd_ap(void)
243 unsigned int cpu
= smp_processor_id();
244 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
247 rdmsr(MSR_AMD64_PATCH_LEVEL
, rev
, eax
);
248 eax
= cpuid_eax(0x00000001);
250 uci
->cpu_sig
.rev
= rev
;
251 uci
->cpu_sig
.sig
= eax
;
253 if (cpu
&& !ucode_loaded
) {
256 if (!ucode_size
|| !initrd_start
)
259 ucode
= (void *)(initrd_start
+ ucode_offset
);
260 eax
= ((eax
>> 8) & 0xf) + ((eax
>> 20) & 0xff);
261 if (load_microcode_amd(eax
, ucode
, ucode_size
) != UCODE_OK
)
267 apply_microcode_amd(cpu
);
271 int __init
save_microcode_in_initrd_amd(void)
273 enum ucode_state ret
;
278 unsigned int bsp
= boot_cpu_data
.cpu_index
;
279 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ bsp
;
281 if (!uci
->cpu_sig
.sig
)
282 smp_call_function_single(bsp
, collect_cpu_sig_on_bsp
, NULL
, 1);
285 pr_info("microcode: updated early to new patch_level=0x%08x\n",
288 if (ucode_loaded
|| !ucode_size
|| !initrd_start
)
291 ucode
= (void *)(initrd_start
+ ucode_offset
);
292 eax
= cpuid_eax(0x00000001);
293 eax
= ((eax
>> 8) & 0xf) + ((eax
>> 20) & 0xff);
295 ret
= load_microcode_amd(eax
, ucode
, ucode_size
);