4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
29 #include <sys/asm_linkage.h>
30 #include <sys/bootconf.h>
31 #include <sys/cpuvar.h>
32 #include <sys/cmn_err.h>
33 #include <sys/controlregs.h>
34 #include <sys/debug.h>
36 #include <sys/kobj_impl.h>
37 #include <sys/machsystm.h>
38 #include <sys/ontrap.h>
39 #include <sys/param.h>
40 #include <sys/machparam.h>
41 #include <sys/promif.h>
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/types.h>
45 #include <sys/thread.h>
46 #include <sys/ucode.h>
47 #include <sys/x86_archext.h>
48 #include <sys/x_call.h>
50 #include <sys/hypervisor.h>
54 * AMD-specific equivalence table
56 static ucode_eqtbl_amd_t
*ucode_eqtbl_amd
;
59 * mcpu_ucode_info for the boot CPU. Statically allocated.
61 static struct cpu_ucode_info cpu_ucode_info0
;
63 static ucode_file_t ucodefile
;
65 static void* ucode_zalloc(processorid_t
, size_t);
66 static void ucode_free(processorid_t
, void *, size_t);
68 static int ucode_capable_amd(cpu_t
*);
69 static int ucode_capable_intel(cpu_t
*);
71 static ucode_errno_t
ucode_extract_amd(ucode_update_t
*, uint8_t *, int);
72 static ucode_errno_t
ucode_extract_intel(ucode_update_t
*, uint8_t *,
75 static void ucode_file_reset_amd(ucode_file_t
*, processorid_t
);
76 static void ucode_file_reset_intel(ucode_file_t
*, processorid_t
);
78 static uint32_t ucode_load_amd(ucode_file_t
*, cpu_ucode_info_t
*, cpu_t
*);
79 static uint32_t ucode_load_intel(ucode_file_t
*, cpu_ucode_info_t
*, cpu_t
*);
82 static void ucode_load_xpv(ucode_update_t
*);
83 static void ucode_chipset_amd(uint8_t *, int);
86 static int ucode_equiv_cpu_amd(cpu_t
*, uint16_t *);
88 static ucode_errno_t
ucode_locate_amd(cpu_t
*, cpu_ucode_info_t
*,
90 static ucode_errno_t
ucode_locate_intel(cpu_t
*, cpu_ucode_info_t
*,
94 static ucode_errno_t
ucode_match_amd(uint16_t, cpu_ucode_info_t
*,
95 ucode_file_amd_t
*, int);
97 static ucode_errno_t
ucode_match_intel(int, cpu_ucode_info_t
*,
98 ucode_header_intel_t
*, ucode_ext_table_intel_t
*);
100 static void ucode_read_rev_amd(cpu_ucode_info_t
*);
101 static void ucode_read_rev_intel(cpu_ucode_info_t
*);
103 static const struct ucode_ops ucode_amd
= {
106 ucode_file_reset_amd
,
114 static const struct ucode_ops ucode_intel
= {
115 MSR_INTC_UCODE_WRITE
,
117 ucode_file_reset_intel
,
118 ucode_read_rev_intel
,
120 ucode_validate_intel
,
125 const struct ucode_ops
*ucode
;
127 static const char ucode_failure_fmt
[] =
128 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
129 static const char ucode_success_fmt
[] =
130 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
133 * Force flag. If set, the first microcode binary that matches
134 * signature and platform id will be used for microcode update,
135 * regardless of version. Should only be used for debugging.
137 int ucode_force_update
= 0;
140 * Allocate space for mcpu_ucode_info in the machcpu structure
141 * for all non-boot CPUs.
144 ucode_alloc_space(cpu_t
*cp
)
146 ASSERT(cp
->cpu_id
!= 0);
147 ASSERT(cp
->cpu_m
.mcpu_ucode_info
== NULL
);
148 cp
->cpu_m
.mcpu_ucode_info
=
149 kmem_zalloc(sizeof (*cp
->cpu_m
.mcpu_ucode_info
), KM_SLEEP
);
153 ucode_free_space(cpu_t
*cp
)
155 ASSERT(cp
->cpu_m
.mcpu_ucode_info
!= NULL
);
156 ASSERT(cp
->cpu_m
.mcpu_ucode_info
!= &cpu_ucode_info0
);
157 kmem_free(cp
->cpu_m
.mcpu_ucode_info
,
158 sizeof (*cp
->cpu_m
.mcpu_ucode_info
));
159 cp
->cpu_m
.mcpu_ucode_info
= NULL
;
163 * Called when we are done with microcode update on all processors to free up
164 * space allocated for the microcode file.
172 ucode
->file_reset(&ucodefile
, -1);
176 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
177 * allocated with BOP_ALLOC() and does not require a free.
180 ucode_zalloc(processorid_t id
, size_t size
)
183 return (kmem_zalloc(size
, KM_NOSLEEP
));
185 /* BOP_ALLOC() failure results in panic */
186 return (BOP_ALLOC(bootops
, NULL
, size
, MMU_PAGESIZE
));
190 ucode_free(processorid_t id
, void* buf
, size_t size
)
193 kmem_free(buf
, size
);
197 * Check whether or not a processor is capable of microcode operations
198 * Returns 1 if it is capable, 0 if not.
200 * At this point we only support microcode update for:
201 * - Intel processors family 6 and above, and
202 * - AMD processors family 0x10 and above.
204 * We also assume that we don't support a mix of Intel and
205 * AMD processors in the same box.
207 * An i86xpv guest domain or VM can't update the microcode.
210 #define XPVDOMU_OR_HVM \
211 ((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0)
215 ucode_capable_amd(cpu_t
*cp
)
217 int hwenv
= get_hwenv();
222 return (cpuid_getfamily(cp
) >= 0x10);
226 ucode_capable_intel(cpu_t
*cp
)
228 int hwenv
= get_hwenv();
233 return (cpuid_getfamily(cp
) >= 6);
237 * Called when it is no longer necessary to keep the microcode around,
238 * or when the cached microcode doesn't match the CPU being processed.
241 ucode_file_reset_amd(ucode_file_t
*ufp
, processorid_t id
)
243 ucode_file_amd_t
*ucodefp
= ufp
->amd
;
248 ucode_free(id
, ucodefp
, sizeof (ucode_file_amd_t
));
253 ucode_file_reset_intel(ucode_file_t
*ufp
, processorid_t id
)
255 ucode_file_intel_t
*ucodefp
= &ufp
->intel
;
256 int total_size
, body_size
;
258 if (ucodefp
== NULL
|| ucodefp
->uf_header
== NULL
)
261 total_size
= UCODE_TOTAL_SIZE_INTEL(ucodefp
->uf_header
->uh_total_size
);
262 body_size
= UCODE_BODY_SIZE_INTEL(ucodefp
->uf_header
->uh_body_size
);
263 if (ucodefp
->uf_body
) {
264 ucode_free(id
, ucodefp
->uf_body
, body_size
);
265 ucodefp
->uf_body
= NULL
;
268 if (ucodefp
->uf_ext_table
) {
269 int size
= total_size
- body_size
- UCODE_HEADER_SIZE_INTEL
;
271 ucode_free(id
, ucodefp
->uf_ext_table
, size
);
272 ucodefp
->uf_ext_table
= NULL
;
275 ucode_free(id
, ucodefp
->uf_header
, UCODE_HEADER_SIZE_INTEL
);
276 ucodefp
->uf_header
= NULL
;
280 * Find the equivalent CPU id in the equivalence table.
283 ucode_equiv_cpu_amd(cpu_t
*cp
, uint16_t *eq_sig
)
285 char name
[MAXPATHLEN
];
288 int offset
= 0, cpi_sig
= cpuid_getsig(cp
);
289 ucode_eqtbl_amd_t
*eqtbl
= ucode_eqtbl_amd
;
291 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/equivalence-table",
292 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
));
295 * No kmem_zalloc() etc. available on boot cpu.
297 if (cp
->cpu_id
== 0) {
298 if ((fd
= kobj_open(name
)) == -1)
299 return (EM_OPENFILE
);
300 /* ucode_zalloc() cannot fail on boot cpu */
301 eqtbl
= ucode_zalloc(cp
->cpu_id
, sizeof (*eqtbl
));
304 count
= kobj_read(fd
, (int8_t *)eqtbl
,
305 sizeof (*eqtbl
), offset
);
306 if (count
!= sizeof (*eqtbl
)) {
307 (void) kobj_close(fd
);
308 return (EM_HIGHERREV
);
311 } while (eqtbl
->ue_inst_cpu
&& eqtbl
->ue_inst_cpu
!= cpi_sig
);
312 (void) kobj_close(fd
);
316 * If not already done, load the equivalence table.
317 * Not done on boot CPU.
323 if ((eq
= kobj_open_file(name
)) == (struct _buf
*)-1)
324 return (EM_OPENFILE
);
326 if (kobj_get_filesize(eq
, &size
) < 0) {
328 return (EM_OPENFILE
);
331 ucode_eqtbl_amd
= kmem_zalloc(size
, KM_NOSLEEP
);
332 if (ucode_eqtbl_amd
== NULL
) {
337 count
= kobj_read_file(eq
, (char *)ucode_eqtbl_amd
, size
, 0);
341 return (EM_FILESIZE
);
344 /* Get the equivalent CPU id. */
346 for (eqtbl
= ucode_eqtbl_amd
;
347 eqtbl
->ue_inst_cpu
&& eqtbl
->ue_inst_cpu
!= cpi_sig
;
351 *eq_sig
= eqtbl
->ue_equiv_cpu
;
353 /* No equivalent CPU id found, assume outdated microcode file. */
355 return (EM_HIGHERREV
);
361 * xVM cannot check for the presence of PCI devices. Look for chipset-
362 * specific microcode patches in the container file and disable them
363 * by setting their CPU revision to an invalid value.
367 ucode_chipset_amd(uint8_t *buf
, int size
)
369 ucode_header_amd_t
*uh
;
370 uint32_t *ptr
= (uint32_t *)buf
;
373 /* skip to first microcode patch */
374 ptr
+= 2; len
= *ptr
++; ptr
+= len
>> 2; size
-= len
;
376 while (size
>= sizeof (ucode_header_amd_t
) + 8) {
378 uh
= (ucode_header_amd_t
*)ptr
;
379 ptr
+= len
>> 2; size
-= len
;
382 cmn_err(CE_WARN
, "ignoring northbridge-specific ucode: "
383 "chipset id %x, revision %x",
384 uh
->uh_nb_id
, uh
->uh_nb_rev
);
385 uh
->uh_cpu_rev
= 0xffff;
389 cmn_err(CE_WARN
, "ignoring southbridge-specific ucode: "
390 "chipset id %x, revision %x",
391 uh
->uh_sb_id
, uh
->uh_sb_rev
);
392 uh
->uh_cpu_rev
= 0xffff;
399 * Populate the ucode file structure from microcode file corresponding to
400 * this CPU, if exists.
402 * Return EM_OK on success, corresponding error code on failure.
406 ucode_locate_amd(cpu_t
*cp
, cpu_ucode_info_t
*uinfop
, ucode_file_t
*ufp
)
408 char name
[MAXPATHLEN
];
411 ucode_file_amd_t
*ucodefp
= ufp
->amd
;
417 /* get equivalent CPU id */
418 if ((rc
= ucode_equiv_cpu_amd(cp
, &eq_sig
)) != EM_OK
)
422 * Allocate a buffer for the microcode patch. If the buffer has been
423 * allocated before, check for a matching microcode to avoid loading
427 ucodefp
= ucode_zalloc(cp
->cpu_id
, sizeof (*ucodefp
));
428 else if (ucode_match_amd(eq_sig
, uinfop
, ucodefp
, sizeof (*ucodefp
))
438 * Find the patch for this CPU. The patch files are named XXXX-YY, where
439 * XXXX is the equivalent CPU id and YY is the running patch number.
440 * Patches specific to certain chipsets are guaranteed to have lower
441 * numbers than less specific patches, so we can just load the first
442 * patch that matches.
445 for (i
= 0; i
< 0xff; i
++) {
446 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/%04X-%02X",
447 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
), eq_sig
, i
);
448 if ((fd
= kobj_open(name
)) == -1)
450 count
= kobj_read(fd
, (char *)ucodefp
, sizeof (*ucodefp
), 0);
451 (void) kobj_close(fd
);
453 if (ucode_match_amd(eq_sig
, uinfop
, ucodefp
, count
) == EM_OK
)
462 * The xVM case is special. To support mixed-revision systems, the
463 * hypervisor will choose which patch to load for which CPU, so the
464 * whole microcode patch container file will have to be loaded.
466 * Since this code is only run on the boot cpu, we don't have to care
467 * about failing ucode_zalloc() or freeing allocated memory.
470 return (EM_INVALIDARG
);
472 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/container",
473 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
));
475 if ((fd
= kobj_open(name
)) == -1)
476 return (EM_OPENFILE
);
478 /* get the file size by counting bytes */
480 count
= kobj_read(fd
, &c
, 1, size
);
484 ucodefp
= ucode_zalloc(cp
->cpu_id
, sizeof (*ucodefp
));
488 ucodefp
->usize
= size
;
489 ucodefp
->ucodep
= ucode_zalloc(cp
->cpu_id
, size
);
490 ASSERT(ucodefp
->ucodep
);
492 /* load the microcode patch container file */
493 count
= kobj_read(fd
, (char *)ucodefp
->ucodep
, size
, 0);
494 (void) kobj_close(fd
);
497 return (EM_FILESIZE
);
499 /* make sure the container file is valid */
500 rc
= ucode
->validate(ucodefp
->ucodep
, ucodefp
->usize
);
505 /* disable chipset-specific patches */
506 ucode_chipset_amd(ucodefp
->ucodep
, ucodefp
->usize
);
513 ucode_locate_intel(cpu_t
*cp
, cpu_ucode_info_t
*uinfop
, ucode_file_t
*ufp
)
515 char name
[MAXPATHLEN
];
518 int header_size
= UCODE_HEADER_SIZE_INTEL
;
519 int cpi_sig
= cpuid_getsig(cp
);
520 ucode_errno_t rc
= EM_OK
;
521 ucode_file_intel_t
*ucodefp
= &ufp
->intel
;
526 * If the microcode matches the CPU we are processing, use it.
528 if (ucode_match_intel(cpi_sig
, uinfop
, ucodefp
->uf_header
,
529 ucodefp
->uf_ext_table
) == EM_OK
&& ucodefp
->uf_body
!= NULL
) {
534 * Look for microcode file with the right name.
536 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/%08X-%02X",
537 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
), cpi_sig
,
539 if ((fd
= kobj_open(name
)) == -1) {
540 return (EM_OPENFILE
);
544 * We found a microcode file for the CPU we are processing,
545 * reset the microcode data structure and read in the new
548 ucode
->file_reset(ufp
, cp
->cpu_id
);
550 ucodefp
->uf_header
= ucode_zalloc(cp
->cpu_id
, header_size
);
551 if (ucodefp
->uf_header
== NULL
)
554 count
= kobj_read(fd
, (char *)ucodefp
->uf_header
, header_size
, 0);
557 case UCODE_HEADER_SIZE_INTEL
: {
559 ucode_header_intel_t
*uhp
= ucodefp
->uf_header
;
560 uint32_t offset
= header_size
;
561 int total_size
, body_size
, ext_size
;
565 * Make sure that the header contains valid fields.
567 if ((rc
= ucode_header_validate_intel(uhp
)) == EM_OK
) {
568 total_size
= UCODE_TOTAL_SIZE_INTEL(uhp
->uh_total_size
);
569 body_size
= UCODE_BODY_SIZE_INTEL(uhp
->uh_body_size
);
570 ucodefp
->uf_body
= ucode_zalloc(cp
->cpu_id
, body_size
);
571 if (ucodefp
->uf_body
== NULL
) {
576 if (kobj_read(fd
, (char *)ucodefp
->uf_body
,
577 body_size
, offset
) != body_size
)
584 sum
= ucode_checksum_intel(0, header_size
,
585 (uint8_t *)ucodefp
->uf_header
);
586 if (ucode_checksum_intel(sum
, body_size
, ucodefp
->uf_body
)) {
592 * Check to see if there is extended signature table.
594 offset
= body_size
+ header_size
;
595 ext_size
= total_size
- offset
;
600 ucodefp
->uf_ext_table
= ucode_zalloc(cp
->cpu_id
, ext_size
);
601 if (ucodefp
->uf_ext_table
== NULL
) {
606 if (kobj_read(fd
, (char *)ucodefp
->uf_ext_table
,
607 ext_size
, offset
) != ext_size
) {
609 } else if (ucode_checksum_intel(0, ext_size
,
610 (uint8_t *)(ucodefp
->uf_ext_table
))) {
615 ext_size
-= UCODE_EXT_TABLE_SIZE_INTEL
;
616 for (i
= 0; i
< ucodefp
->uf_ext_table
->uet_count
;
618 if (ucode_checksum_intel(0,
619 UCODE_EXT_SIG_SIZE_INTEL
,
620 (uint8_t *)(&(ucodefp
->uf_ext_table
->
640 rc
= ucode_match_intel(cpi_sig
, uinfop
, ucodefp
->uf_header
,
641 ucodefp
->uf_ext_table
);
648 ucode_match_amd(uint16_t eq_sig
, cpu_ucode_info_t
*uinfop
,
649 ucode_file_amd_t
*ucodefp
, int size
)
651 ucode_header_amd_t
*uh
;
653 if (ucodefp
== NULL
|| size
< sizeof (ucode_header_amd_t
))
656 uh
= &ucodefp
->uf_header
;
659 * Don't even think about loading patches that would require code
660 * execution. Does not apply to patches for family 0x14 and beyond.
662 if (uh
->uh_cpu_rev
< 0x5000 &&
663 size
> offsetof(ucode_file_amd_t
, uf_code_present
) &&
664 ucodefp
->uf_code_present
)
667 if (eq_sig
!= uh
->uh_cpu_rev
)
671 cmn_err(CE_WARN
, "ignoring northbridge-specific ucode: "
672 "chipset id %x, revision %x", uh
->uh_nb_id
, uh
->uh_nb_rev
);
677 cmn_err(CE_WARN
, "ignoring southbridge-specific ucode: "
678 "chipset id %x, revision %x", uh
->uh_sb_id
, uh
->uh_sb_rev
);
682 if (uh
->uh_patch_id
<= uinfop
->cui_rev
&& !ucode_force_update
)
683 return (EM_HIGHERREV
);
690 * Returns 1 if the microcode is for this processor; 0 otherwise.
693 ucode_match_intel(int cpi_sig
, cpu_ucode_info_t
*uinfop
,
694 ucode_header_intel_t
*uhp
, ucode_ext_table_intel_t
*uetp
)
699 if (UCODE_MATCH_INTEL(cpi_sig
, uhp
->uh_signature
,
700 uinfop
->cui_platid
, uhp
->uh_proc_flags
)) {
702 if (uinfop
->cui_rev
>= uhp
->uh_rev
&& !ucode_force_update
)
703 return (EM_HIGHERREV
);
711 for (i
= 0; i
< uetp
->uet_count
; i
++) {
712 ucode_ext_sig_intel_t
*uesp
;
714 uesp
= &uetp
->uet_ext_sig
[i
];
716 if (UCODE_MATCH_INTEL(cpi_sig
, uesp
->ues_signature
,
717 uinfop
->cui_platid
, uesp
->ues_proc_flags
)) {
719 if (uinfop
->cui_rev
>= uhp
->uh_rev
&&
721 return (EM_HIGHERREV
);
733 ucode_write(xc_arg_t arg1
, xc_arg_t unused2
, xc_arg_t unused3
)
735 ucode_update_t
*uusp
= (ucode_update_t
*)arg1
;
736 cpu_ucode_info_t
*uinfop
= CPU
->cpu_m
.mcpu_ucode_info
;
742 ASSERT(uusp
->ucodep
);
746 * Check one more time to see if it is really necessary to update
747 * microcode just in case this is a hyperthreaded processor where
748 * the threads share the same microcode.
750 if (!ucode_force_update
) {
751 ucode
->read_rev(uinfop
);
752 uusp
->new_rev
= uinfop
->cui_rev
;
753 if (uinfop
->cui_rev
>= uusp
->expected_rev
)
757 if (!on_trap(&otd
, OT_DATA_ACCESS
))
758 wrmsr(ucode
->write_msr
, (uintptr_t)uusp
->ucodep
);
762 ucode
->read_rev(uinfop
);
763 uusp
->new_rev
= uinfop
->cui_rev
;
770 ucode_load_amd(ucode_file_t
*ufp
, cpu_ucode_info_t
*uinfop
, cpu_t
*cp
)
772 ucode_file_amd_t
*ucodefp
= ufp
->amd
;
784 if (on_trap(&otd
, OT_DATA_ACCESS
)) {
789 wrmsr(ucode
->write_msr
, (uintptr_t)ucodefp
);
791 ucode
->read_rev(uinfop
);
794 return (ucodefp
->uf_header
.uh_patch_id
);
796 uus
.ucodep
= ucodefp
->ucodep
;
797 uus
.usize
= ucodefp
->usize
;
798 ucode_load_xpv(&uus
);
799 ucode
->read_rev(uinfop
);
800 uus
.new_rev
= uinfop
->cui_rev
;
802 return (uus
.new_rev
);
808 ucode_load_intel(ucode_file_t
*ufp
, cpu_ucode_info_t
*uinfop
, cpu_t
*cp
)
810 ucode_file_intel_t
*ucodefp
= &ufp
->intel
;
824 * the hypervisor wants the header, data, and extended
825 * signature tables. We can only get here from the boot
826 * CPU (cpu #0), we don't need to free as ucode_zalloc() will
829 usize
= UCODE_TOTAL_SIZE_INTEL(ucodefp
->uf_header
->uh_total_size
);
830 ustart
= ucode_zalloc(cp
->cpu_id
, usize
);
833 body_size
= UCODE_BODY_SIZE_INTEL(ucodefp
->uf_header
->uh_body_size
);
834 ext_offset
= body_size
+ UCODE_HEADER_SIZE_INTEL
;
835 ext_size
= usize
- ext_offset
;
836 ASSERT(ext_size
>= 0);
838 (void) memcpy(ustart
, ucodefp
->uf_header
, UCODE_HEADER_SIZE_INTEL
);
839 (void) memcpy(&ustart
[UCODE_HEADER_SIZE_INTEL
], ucodefp
->uf_body
,
842 (void) memcpy(&ustart
[ext_offset
],
843 ucodefp
->uf_ext_table
, ext_size
);
847 ucode_load_xpv(&uus
);
848 ucode
->read_rev(uinfop
);
849 uus
.new_rev
= uinfop
->cui_rev
;
852 wrmsr(ucode
->write_msr
, (uintptr_t)ucodefp
->uf_body
);
853 ucode
->read_rev(uinfop
);
857 return (ucodefp
->uf_header
->uh_rev
);
863 ucode_load_xpv(ucode_update_t
*uusp
)
865 xen_platform_op_t op
;
868 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info
));
871 op
.cmd
= XENPF_microcode_update
;
872 op
.interface_version
= XENPF_INTERFACE_VERSION
;
873 /*LINTED: constant in conditional context*/
874 set_xen_guest_handle(op
.u
.microcode
.data
, uusp
->ucodep
);
875 op
.u
.microcode
.length
= uusp
->usize
;
876 e
= HYPERVISOR_platform_op(&op
);
878 cmn_err(CE_WARN
, "hypervisor failed to accept uCode update");
885 ucode_read_rev_amd(cpu_ucode_info_t
*uinfop
)
887 uinfop
->cui_rev
= rdmsr(MSR_AMD_PATCHLEVEL
);
891 ucode_read_rev_intel(cpu_ucode_info_t
*uinfop
)
893 struct cpuid_regs crs
;
896 * The Intel 64 and IA-32 Architecture Software Developer's Manual
897 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
898 * execute cpuid to guarantee the correct reading of this register.
900 wrmsr(MSR_INTC_UCODE_REV
, 0);
901 (void) __cpuid_insn(&crs
);
902 uinfop
->cui_rev
= (rdmsr(MSR_INTC_UCODE_REV
) >> INTC_UCODE_REV_SHIFT
);
906 ucode_extract_amd(ucode_update_t
*uusp
, uint8_t *ucodep
, int size
)
909 uint32_t *ptr
= (uint32_t *)ucodep
;
910 ucode_eqtbl_amd_t
*eqtbl
;
911 ucode_file_amd_t
*ufp
;
914 ucode_errno_t rc
= EM_NOMATCH
;
917 /* skip over magic number & equivalence table header */
920 count
= *ptr
++; size
-= 4;
921 for (eqtbl
= (ucode_eqtbl_amd_t
*)ptr
;
922 eqtbl
->ue_inst_cpu
&& eqtbl
->ue_inst_cpu
!= uusp
->sig
;
926 eq_sig
= eqtbl
->ue_equiv_cpu
;
928 /* No equivalent CPU id found, assume outdated microcode file. */
930 return (EM_HIGHERREV
);
932 /* Use the first microcode patch that matches. */
934 ptr
+= count
>> 2; size
-= count
;
937 return (higher
? EM_HIGHERREV
: EM_NOMATCH
);
940 count
= *ptr
++; size
-= 4;
941 ufp
= (ucode_file_amd_t
*)ptr
;
943 rc
= ucode_match_amd(eq_sig
, &uusp
->info
, ufp
, count
);
944 if (rc
== EM_HIGHERREV
)
946 } while (rc
!= EM_OK
);
948 uusp
->ucodep
= (uint8_t *)ufp
;
950 uusp
->expected_rev
= ufp
->uf_header
.uh_patch_id
;
953 * The hypervisor will choose the patch to load, so there is no way to
954 * know the "expected revision" in advance. This is especially true on
955 * mixed-revision systems where more than one patch will be loaded.
957 uusp
->expected_rev
= 0;
958 uusp
->ucodep
= ucodep
;
961 ucode_chipset_amd(ucodep
, size
);
968 ucode_extract_intel(ucode_update_t
*uusp
, uint8_t *ucodep
, int size
)
970 uint32_t header_size
= UCODE_HEADER_SIZE_INTEL
;
973 ucode_errno_t search_rc
= EM_NOMATCH
; /* search result */
976 * Go through the whole buffer in case there are
977 * multiple versions of matching microcode for this
980 for (remaining
= size
; remaining
> 0; ) {
981 int total_size
, body_size
, ext_size
;
982 uint8_t *curbuf
= &ucodep
[size
- remaining
];
983 ucode_header_intel_t
*uhp
= (ucode_header_intel_t
*)curbuf
;
984 ucode_ext_table_intel_t
*uetp
= NULL
;
987 total_size
= UCODE_TOTAL_SIZE_INTEL(uhp
->uh_total_size
);
988 body_size
= UCODE_BODY_SIZE_INTEL(uhp
->uh_body_size
);
989 ext_size
= total_size
- (header_size
+ body_size
);
992 uetp
= (ucode_ext_table_intel_t
*)
993 &curbuf
[header_size
+ body_size
];
995 tmprc
= ucode_match_intel(uusp
->sig
, &uusp
->info
, uhp
, uetp
);
998 * Since we are searching through a big file
999 * containing microcode for pretty much all the
1000 * processors, we are bound to get EM_NOMATCH
1001 * at one point. However, if we return
1002 * EM_NOMATCH to users, it will really confuse
1003 * them. Therefore, if we ever find a match of
1004 * a lower rev, we will set return code to
1007 if (tmprc
== EM_HIGHERREV
)
1008 search_rc
= EM_HIGHERREV
;
1010 if (tmprc
== EM_OK
&&
1011 uusp
->expected_rev
< uhp
->uh_rev
) {
1013 uusp
->ucodep
= (uint8_t *)&curbuf
[header_size
];
1015 uusp
->ucodep
= (uint8_t *)curbuf
;
1018 UCODE_TOTAL_SIZE_INTEL(uhp
->uh_total_size
);
1019 uusp
->expected_rev
= uhp
->uh_rev
;
1023 remaining
-= total_size
;
1032 * Entry point to microcode update from the ucode_drv driver.
1034 * Returns EM_OK on success, corresponding error code on failure.
1037 ucode_update(uint8_t *ucodep
, int size
)
1041 ucode_update_t cached
= { 0 };
1042 ucode_update_t
*cachedp
= NULL
;
1043 ucode_errno_t rc
= EM_OK
;
1044 ucode_errno_t search_rc
= EM_NOMATCH
; /* search result */
1049 CPUSET_ZERO(cpuset
);
1051 if (!ucode
->capable(CPU
))
1054 mutex_enter(&cpu_lock
);
1056 for (id
= 0; id
< max_ncpus
; id
++) {
1058 ucode_update_t uus
= { 0 };
1059 ucode_update_t
*uusp
= &uus
;
1062 * If there is no such CPU or it is not xcall ready, skip it.
1064 if ((cpu
= cpu_get(id
)) == NULL
||
1065 !(cpu
->cpu_flags
& CPU_READY
))
1068 uusp
->sig
= cpuid_getsig(cpu
);
1069 bcopy(cpu
->cpu_m
.mcpu_ucode_info
, &uusp
->info
,
1070 sizeof (uusp
->info
));
1073 * If the current CPU has the same signature and platform
1074 * id as the previous one we processed, reuse the information.
1076 if (cachedp
&& cachedp
->sig
== cpuid_getsig(cpu
) &&
1077 cachedp
->info
.cui_platid
== uusp
->info
.cui_platid
) {
1078 uusp
->ucodep
= cachedp
->ucodep
;
1079 uusp
->expected_rev
= cachedp
->expected_rev
;
1081 * Intuitively we should check here to see whether the
1082 * running microcode rev is >= the expected rev, and
1083 * quit if it is. But we choose to proceed with the
1084 * xcall regardless of the running version so that
1085 * the other threads in an HT processor can update
1086 * the cpu_ucode_info structure in machcpu.
1088 } else if ((search_rc
= ucode
->extract(uusp
, ucodep
, size
))
1090 bcopy(uusp
, &cached
, sizeof (cached
));
1096 if (uusp
->ucodep
== NULL
)
1101 * for i86xpv, the hypervisor will update all the CPUs.
1102 * the hypervisor wants the header, data, and extended
1103 * signature tables. ucode_write will just read in the
1104 * updated version on all the CPUs after the update has
1108 ucode_load_xpv(uusp
);
1112 CPUSET_ADD(cpuset
, id
);
1114 xc_sync((xc_arg_t
)uusp
, 0, 0, CPUSET2BV(cpuset
), ucode_write
);
1116 CPUSET_DEL(cpuset
, id
);
1118 if (uusp
->new_rev
!= 0 && uusp
->info
.cui_rev
== uusp
->new_rev
&&
1119 !ucode_force_update
) {
1121 } else if ((uusp
->new_rev
== 0) || (uusp
->expected_rev
!= 0 &&
1122 uusp
->expected_rev
!= uusp
->new_rev
)) {
1123 cmn_err(CE_WARN
, ucode_failure_fmt
,
1124 id
, uusp
->info
.cui_rev
, uusp
->expected_rev
);
1127 cmn_err(CE_CONT
, ucode_success_fmt
,
1128 id
, uusp
->info
.cui_rev
, uusp
->new_rev
);
1132 mutex_exit(&cpu_lock
);
1141 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
1142 * This is the entry point from boot path where pointer to CPU structure
1145 * cpuid_info must be initialized before ucode_check can be called.
1148 ucode_check(cpu_t
*cp
)
1150 cpu_ucode_info_t
*uinfop
;
1151 ucode_errno_t rc
= EM_OK
;
1152 uint32_t new_rev
= 0;
1156 * Space statically allocated for BSP, ensure pointer is set
1158 if (cp
->cpu_id
== 0 && cp
->cpu_m
.mcpu_ucode_info
== NULL
)
1159 cp
->cpu_m
.mcpu_ucode_info
= &cpu_ucode_info0
;
1161 uinfop
= cp
->cpu_m
.mcpu_ucode_info
;
1164 /* set up function pointers if not already done */
1166 switch (cpuid_getvendor(cp
)) {
1167 case X86_VENDOR_AMD
:
1170 case X86_VENDOR_Intel
:
1171 ucode
= &ucode_intel
;
1178 if (!ucode
->capable(cp
))
1182 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
1183 * (Family 6, model 5 and above) and all processors after.
1185 if ((cpuid_getvendor(cp
) == X86_VENDOR_Intel
) &&
1186 ((cpuid_getmodel(cp
) >= 5) || (cpuid_getfamily(cp
) > 6))) {
1187 uinfop
->cui_platid
= 1 << ((rdmsr(MSR_INTC_PLATFORM_ID
) >>
1188 INTC_PLATFORM_ID_SHIFT
) & INTC_PLATFORM_ID_MASK
);
1191 ucode
->read_rev(uinfop
);
1195 * for i86xpv, the hypervisor will update all the CPUs. We only need
1196 * do do this on one of the CPUs (and there always is a CPU 0).
1198 if (cp
->cpu_id
!= 0) {
1204 * Check to see if we need ucode update
1206 if ((rc
= ucode
->locate(cp
, uinfop
, &ucodefile
)) == EM_OK
) {
1207 new_rev
= ucode
->load(&ucodefile
, uinfop
, cp
);
1209 if (uinfop
->cui_rev
!= new_rev
)
1210 cmn_err(CE_WARN
, ucode_failure_fmt
, cp
->cpu_id
,
1211 uinfop
->cui_rev
, new_rev
);
1215 * If we fail to find a match for any reason, free the file structure
1216 * just in case we have read in a partial file.
1218 * Since the scratch memory for holding the microcode for the boot CPU
1219 * came from BOP_ALLOC, we will reset the data structure as if we
1220 * never did the allocation so we don't have to keep track of this
1221 * special chunk of memory. We free the memory used for the rest
1222 * of the CPUs in start_other_cpus().
1224 if (rc
!= EM_OK
|| cp
->cpu_id
== 0)
1225 ucode
->file_reset(&ucodefile
, cp
->cpu_id
);
1229 * Returns microcode revision from the machcpu structure.
1232 ucode_get_rev(uint32_t *revp
)
1239 if (!ucode
->capable(CPU
))
1242 mutex_enter(&cpu_lock
);
1243 for (i
= 0; i
< max_ncpus
; i
++) {
1246 if ((cpu
= cpu_get(i
)) == NULL
)
1249 revp
[i
] = cpu
->cpu_m
.mcpu_ucode_info
->cui_rev
;
1251 mutex_exit(&cpu_lock
);