1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <cpu/amd/amd64_save_state.h>
4 #include <cpu/x86/legacy_save_state.h>
5 #include <cpu/x86/save_state.h>
6 #include <cpu/x86/smm.h>
9 #include <southbridge/intel/common/pmbase.h>
10 #include <southbridge/intel/common/pmutil.h>
14 * SMM in QEMU is unlike that on real hardware. Most notable differences:
16 * - Revision ID is either 0x20000 or 0x20064, depending on whether
17 * qemu-system-i386 or qemu-system-x86_64 is being used.
18 * - SMI_STS is always 0.
19 * - Since I/O Instruction Restart bit in revision ID field is not set, none of
20 * the fields related to I/O instructions is set in saved state. It is
21 * impossible to check if SMI was generated by write to APMC port that way.
22 * - On older versions of QEMU, SMI isn't immediately emulated when Tiny Code
23 * Generator (TCG) accelerator is used, due to how Translation Blocks (TBs)
24 * are built. This means that contents of registers may change before SMI
25 * handler gets invoked. This can be worked around on the caller side by
26 * either writing to APMC port from a non-inlined function that doesn't
27 * return a result (so RAX isn't overwritten), or by following write to port
28 * with an instruction that forces generation of a new TB, e.g. 'pause'. In
29 * both cases, RIP in the save state will not point to the instruction
30 * directly following 'out'. When KVM is used, or in newer QEMU (8.2.2 is
31 * known to work) SMIs are injected immediately and RIP represents next
32 * instruction after `out`.
35 static const uint32_t amd64_revisions
[] = {
40 static int amd64_get_reg(const enum cpu_reg reg
, const int node
, void *out
,
43 amd64_smm_state_save_area_t
*save_state
= smm_get_save_state(node
);
45 if (length
!= 1 && length
!= 2 && length
!= 4 && length
!= 8)
50 memcpy(out
, &save_state
->rax
, length
);
53 memcpy(out
, &save_state
->rcx
, length
);
56 memcpy(out
, &save_state
->rdx
, length
);
59 memcpy(out
, &save_state
->rbx
, length
);
66 static int amd64_set_reg(const enum cpu_reg reg
, const int node
, void *in
,
69 amd64_smm_state_save_area_t
*save_state
= smm_get_save_state(node
);
71 if (length
!= 1 && length
!= 2 && length
!= 4 && length
!= 8)
77 memcpy(&save_state
->rax
, in
, length
);
81 memcpy(&save_state
->rcx
, in
, length
);
85 memcpy(&save_state
->rdx
, in
, length
);
89 memcpy(&save_state
->rbx
, in
, length
);
96 static int amd64_apmc_node(u8 cmd
)
98 amd64_smm_state_save_area_t
*save_state
;
101 for (node
= 0; (unsigned int)node
< CONFIG_MAX_CPUS
; node
++) {
102 save_state
= smm_get_save_state(node
);
108 * Since fields related to I/O instructions are not filled in, check
109 * RAX against command number only. There is 1/256 probability of false
112 * The alternative would be to:
113 * - parse saved CR0 and EFER to discover host's execution mode
114 * - parse saved CR3 and host page tables to obtain physical address
115 * corresponding to RIP
116 * - map that page (or multiple, potentially nonconsecutive pages) that
118 * - analyze the code and saved state against one of the `out`
119 * instructions to APM_CNT port
120 * - ideally do so in constant-time manner to not leak information
122 if ((save_state
->rax
& 0xFF) == cmd
)
129 static const struct smm_save_state_ops _amd64_ops
= {
130 .revision_table
= amd64_revisions
,
131 .get_reg
= amd64_get_reg
,
132 .set_reg
= amd64_set_reg
,
133 .apmc_node
= amd64_apmc_node
,
136 const struct smm_save_state_ops
*amd64_ops
= &_amd64_ops
;
138 static const uint32_t legacy_revisions
[] = {
143 static int legacy_get_reg(const enum cpu_reg reg
, const int node
, void *out
,
144 const uint8_t length
)
146 legacy_smm_state_save_area_t
*save_state
= smm_get_save_state(node
);
148 if (length
!= 1 && length
!= 2 && length
!= 4)
153 memcpy(out
, &save_state
->eax
, length
);
156 memcpy(out
, &save_state
->ecx
, length
);
159 memcpy(out
, &save_state
->edx
, length
);
162 memcpy(out
, &save_state
->ebx
, length
);
169 static int legacy_set_reg(const enum cpu_reg reg
, const int node
, void *in
,
170 const uint8_t length
)
172 legacy_smm_state_save_area_t
*save_state
= smm_get_save_state(node
);
174 if (length
!= 1 && length
!= 2 && length
!= 4)
180 memcpy(&save_state
->eax
, in
, length
);
184 memcpy(&save_state
->ecx
, in
, length
);
188 memcpy(&save_state
->edx
, in
, length
);
192 memcpy(&save_state
->ebx
, in
, length
);
199 static int legacy_apmc_node(u8 cmd
)
201 legacy_smm_state_save_area_t
*save_state
;
204 for (node
= 0; (unsigned int)node
< CONFIG_MAX_CPUS
; node
++) {
205 save_state
= smm_get_save_state(node
);
211 * Since fields related to I/O instructions are not filled in, check
212 * EAX against command number only. There is 1/256 probability of false
215 * See comment in amd64_apmc_node().
217 if ((save_state
->eax
& 0xFF) == cmd
)
224 static const struct smm_save_state_ops _legacy_ops
= {
225 .revision_table
= legacy_revisions
,
226 .get_reg
= legacy_get_reg
,
227 .set_reg
= legacy_set_reg
,
228 .apmc_node
= legacy_apmc_node
,
231 const struct smm_save_state_ops
*legacy_ops
= &_legacy_ops
;
233 static void mainboard_smi_gsmi(void)
238 int node
= get_apmc_node(APM_CNT_ELOG_GSMI
);
243 /* Command and return value in EAX */
244 if (get_save_state_reg(RAX
, node
, &ret
, sizeof(ret
)))
247 sub_command
= (u8
)(ret
>> 8);
249 /* Parameter buffer in EBX */
250 if (get_save_state_reg(RBX
, node
, ¶m
, sizeof(param
)))
253 /* drivers/elog/gsmi.c */
254 ret
= gsmi_exec(sub_command
, (u32
*)param
);
256 set_save_state_reg(RAX
, node
, &ret
, sizeof(ret
));
259 static void mainboard_smi_store(void)
264 int node
= get_apmc_node(APM_CNT_SMMSTORE
);
269 /* Command and return value in EAX */
270 if (get_save_state_reg(RAX
, node
, &ret
, sizeof(ret
)))
273 sub_command
= (u8
)(ret
>> 8);
275 /* Parameter buffer in EBX */
276 if (get_save_state_reg(RBX
, node
, ®_rbx
, sizeof(reg_rbx
)))
279 /* drivers/smmstore/smi.c */
280 ret
= smmstore_exec(sub_command
, (void *)reg_rbx
);
282 set_save_state_reg(RAX
, node
, &ret
, sizeof(ret
));
285 static int mainboard_finalized
= 0;
287 void cpu_smi_handler(void)
291 reg8
= apm_get_apmc();
293 case APM_CNT_ACPI_DISABLE
:
294 write_pmbase32(PM1_CNT
, read_pmbase32(PM1_CNT
) & ~SCI_EN
);
296 case APM_CNT_ACPI_ENABLE
:
297 write_pmbase32(PM1_CNT
, read_pmbase32(PM1_CNT
) | SCI_EN
);
299 case APM_CNT_FINALIZE
:
300 if (mainboard_finalized
) {
301 printk(BIOS_DEBUG
, "SMI#: Already finalized\n");
305 southbridge_finalize_all();
306 mainboard_finalized
= 1;
308 case APM_CNT_ELOG_GSMI
:
309 if (CONFIG(ELOG_GSMI
))
310 mainboard_smi_gsmi();
312 case APM_CNT_SMMSTORE
:
313 if (CONFIG(SMMSTORE
))
314 mainboard_smi_store();