spd/lp5: Add Hynix memory part
[coreboot.git] / src / mainboard / emulation / qemu-q35 / smihandler.c
bloba0417b53057610f370159749e7fb88b2e1a1813c
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <cpu/amd/amd64_save_state.h>
4 #include <cpu/x86/legacy_save_state.h>
5 #include <cpu/x86/save_state.h>
6 #include <cpu/x86/smm.h>
7 #include <elog.h>
8 #include <smmstore.h>
9 #include <southbridge/intel/common/pmbase.h>
10 #include <southbridge/intel/common/pmutil.h>
11 #include <string.h>
14 * SMM in QEMU is unlike that on real hardware. Most notable differences:
16 * - Revision ID is either 0x20000 or 0x20064, depending on whether
17 * qemu-system-i386 or qemu-system-x86_64 is being used.
18 * - SMI_STS is always 0.
19 * - Since I/O Instruction Restart bit in revision ID field is not set, none of
20 * the fields related to I/O instructions is set in saved state. It is
21 * impossible to check if SMI was generated by write to APMC port that way.
22 * - On older versions of QEMU, SMI isn't immediately emulated when Tiny Code
23 * Generator (TCG) accelerator is used, due to how Translation Blocks (TBs)
24 * are built. This means that contents of registers may change before SMI
25 * handler gets invoked. This can be worked around on the caller side by
26 * either writing to APMC port from a non-inlined function that doesn't
27 * return a result (so RAX isn't overwritten), or by following write to port
28 * with an instruction that forces generation of a new TB, e.g. 'pause'. In
29 * both cases, RIP in the save state will not point to the instruction
30 * directly following 'out'. When KVM is used, or in newer QEMU (8.2.2 is
31 * known to work) SMIs are injected immediately and RIP represents next
32 * instruction after `out`.
35 static const uint32_t amd64_revisions[] = {
36 0x00020064,
37 SMM_REV_INVALID,
40 static int amd64_get_reg(const enum cpu_reg reg, const int node, void *out,
41 const uint8_t length)
43 amd64_smm_state_save_area_t *save_state = smm_get_save_state(node);
45 if (length != 1 && length != 2 && length != 4 && length != 8)
46 return -1;
48 switch (reg) {
49 case RAX:
50 memcpy(out, &save_state->rax, length);
51 return 0;
52 case RCX:
53 memcpy(out, &save_state->rcx, length);
54 return 0;
55 case RDX:
56 memcpy(out, &save_state->rdx, length);
57 return 0;
58 case RBX:
59 memcpy(out, &save_state->rbx, length);
60 return 0;
63 return -1;
66 static int amd64_set_reg(const enum cpu_reg reg, const int node, void *in,
67 const uint8_t length)
69 amd64_smm_state_save_area_t *save_state = smm_get_save_state(node);
71 if (length != 1 && length != 2 && length != 4 && length != 8)
72 return -1;
74 switch (reg) {
75 case RAX:
76 save_state->rax = 0;
77 memcpy(&save_state->rax, in, length);
78 return 0;
79 case RCX:
80 save_state->rcx = 0;
81 memcpy(&save_state->rcx, in, length);
82 return 0;
83 case RDX:
84 save_state->rdx = 0;
85 memcpy(&save_state->rdx, in, length);
86 return 0;
87 case RBX:
88 save_state->rbx = 0;
89 memcpy(&save_state->rbx, in, length);
90 return 0;
93 return -1;
96 static int amd64_apmc_node(u8 cmd)
98 amd64_smm_state_save_area_t *save_state;
99 int node;
101 for (node = 0; (unsigned int)node < CONFIG_MAX_CPUS; node++) {
102 save_state = smm_get_save_state(node);
104 if (!save_state)
105 continue;
108 * Since fields related to I/O instructions are not filled in, check
109 * RAX against command number only. There is 1/256 probability of false
110 * positive.
112 * The alternative would be to:
113 * - parse saved CR0 and EFER to discover host's execution mode
114 * - parse saved CR3 and host page tables to obtain physical address
115 * corresponding to RIP
116 * - map that page (or multiple, potentially nonconsecutive pages) that
117 * cover the code
118 * - analyze the code and saved state against one of the `out`
119 * instructions to APM_CNT port
120 * - ideally do so in constant-time manner to not leak information
122 if ((save_state->rax & 0xFF) == cmd)
123 return node;
126 return -1;
129 static const struct smm_save_state_ops _amd64_ops = {
130 .revision_table = amd64_revisions,
131 .get_reg = amd64_get_reg,
132 .set_reg = amd64_set_reg,
133 .apmc_node = amd64_apmc_node,
136 const struct smm_save_state_ops *amd64_ops = &_amd64_ops;
138 static const uint32_t legacy_revisions[] = {
139 0x00020000,
140 SMM_REV_INVALID,
143 static int legacy_get_reg(const enum cpu_reg reg, const int node, void *out,
144 const uint8_t length)
146 legacy_smm_state_save_area_t *save_state = smm_get_save_state(node);
148 if (length != 1 && length != 2 && length != 4)
149 return -1;
151 switch (reg) {
152 case RAX:
153 memcpy(out, &save_state->eax, length);
154 return 0;
155 case RCX:
156 memcpy(out, &save_state->ecx, length);
157 return 0;
158 case RDX:
159 memcpy(out, &save_state->edx, length);
160 return 0;
161 case RBX:
162 memcpy(out, &save_state->ebx, length);
163 return 0;
166 return -1;
169 static int legacy_set_reg(const enum cpu_reg reg, const int node, void *in,
170 const uint8_t length)
172 legacy_smm_state_save_area_t *save_state = smm_get_save_state(node);
174 if (length != 1 && length != 2 && length != 4)
175 return -1;
177 switch (reg) {
178 case RAX:
179 save_state->eax = 0;
180 memcpy(&save_state->eax, in, length);
181 return 0;
182 case RCX:
183 save_state->ecx = 0;
184 memcpy(&save_state->ecx, in, length);
185 return 0;
186 case RDX:
187 save_state->edx = 0;
188 memcpy(&save_state->edx, in, length);
189 return 0;
190 case RBX:
191 save_state->ebx = 0;
192 memcpy(&save_state->ebx, in, length);
193 return 0;
196 return -1;
199 static int legacy_apmc_node(u8 cmd)
201 legacy_smm_state_save_area_t *save_state;
202 int node;
204 for (node = 0; (unsigned int)node < CONFIG_MAX_CPUS; node++) {
205 save_state = smm_get_save_state(node);
207 if (!save_state)
208 continue;
211 * Since fields related to I/O instructions are not filled in, check
212 * EAX against command number only. There is 1/256 probability of false
213 * positive.
215 * See comment in amd64_apmc_node().
217 if ((save_state->eax & 0xFF) == cmd)
218 return node;
221 return -1;
224 static const struct smm_save_state_ops _legacy_ops = {
225 .revision_table = legacy_revisions,
226 .get_reg = legacy_get_reg,
227 .set_reg = legacy_set_reg,
228 .apmc_node = legacy_apmc_node,
231 const struct smm_save_state_ops *legacy_ops = &_legacy_ops;
233 static void mainboard_smi_gsmi(void)
235 u32 ret;
236 u8 sub_command;
237 uintptr_t param;
238 int node = get_apmc_node(APM_CNT_ELOG_GSMI);
240 if (node < 0)
241 return;
243 /* Command and return value in EAX */
244 if (get_save_state_reg(RAX, node, &ret, sizeof(ret)))
245 return;
247 sub_command = (u8)(ret >> 8);
249 /* Parameter buffer in EBX */
250 if (get_save_state_reg(RBX, node, &param, sizeof(param)))
251 return;
253 /* drivers/elog/gsmi.c */
254 ret = gsmi_exec(sub_command, (u32 *)param);
256 set_save_state_reg(RAX, node, &ret, sizeof(ret));
259 static void mainboard_smi_store(void)
261 u32 ret;
262 u8 sub_command;
263 uintptr_t reg_rbx;
264 int node = get_apmc_node(APM_CNT_SMMSTORE);
266 if (node < 0)
267 return;
269 /* Command and return value in EAX */
270 if (get_save_state_reg(RAX, node, &ret, sizeof(ret)))
271 return;
273 sub_command = (u8)(ret >> 8);
275 /* Parameter buffer in EBX */
276 if (get_save_state_reg(RBX, node, &reg_rbx, sizeof(reg_rbx)))
277 return;
279 /* drivers/smmstore/smi.c */
280 ret = smmstore_exec(sub_command, (void *)reg_rbx);
282 set_save_state_reg(RAX, node, &ret, sizeof(ret));
285 static int mainboard_finalized = 0;
287 void cpu_smi_handler(void)
289 u8 reg8;
291 reg8 = apm_get_apmc();
292 switch (reg8) {
293 case APM_CNT_ACPI_DISABLE:
294 write_pmbase32(PM1_CNT, read_pmbase32(PM1_CNT) & ~SCI_EN);
295 break;
296 case APM_CNT_ACPI_ENABLE:
297 write_pmbase32(PM1_CNT, read_pmbase32(PM1_CNT) | SCI_EN);
298 break;
299 case APM_CNT_FINALIZE:
300 if (mainboard_finalized) {
301 printk(BIOS_DEBUG, "SMI#: Already finalized\n");
302 return;
305 southbridge_finalize_all();
306 mainboard_finalized = 1;
307 break;
308 case APM_CNT_ELOG_GSMI:
309 if (CONFIG(ELOG_GSMI))
310 mainboard_smi_gsmi();
311 break;
312 case APM_CNT_SMMSTORE:
313 if (CONFIG(SMMSTORE))
314 mainboard_smi_store();
315 break;