1 /* SPDX-License-Identifier: GPL-2.0-only */
5 #include <console/console.h>
6 #include <cpu/x86/cache.h>
7 #include <cpu/x86/msr.h>
8 #include <cpu/x86/smm.h>
9 #include <cpu/intel/em64t100_save_state.h>
10 #include <cpu/intel/em64t101_save_state.h>
11 #include <cpu/intel/msr.h>
13 #include <device/mmio.h>
14 #include <device/pci_def.h>
15 #include <device/pci_ops.h>
17 #include <intelblocks/fast_spi.h>
18 #include <intelblocks/oc_wdt.h>
19 #include <intelblocks/pmclib.h>
20 #include <intelblocks/smihandler.h>
21 #include <intelblocks/tco.h>
22 #include <intelblocks/uart.h>
25 #include <soc/pci_devs.h>
28 #include <soc/iomap.h>
29 #include <soc/smbus.h>
30 #include <spi-generic.h>
35 __weak
const struct smm_save_state_ops
*get_smm_save_state_ops(void)
37 return &em64t101_smm_ops
;
40 /* Specific SOC SMI handler during ramstage finalize phase */
41 __weak
void smihandler_soc_at_finalize(void)
46 __weak
int smihandler_soc_disable_busmaster(pci_devfn_t dev
)
51 /* Mainboard overrides. */
53 __weak
void mainboard_smi_gpi_handler(
54 const struct gpi_status
*sts
)
59 __weak
void mainboard_smi_espi_handler(void)
64 /* Common Functions */
66 static void *find_save_state(const struct smm_save_state_ops
*save_state_ops
,
71 uint32_t io_misc_info
;
74 /* Check all nodes looking for the one that issued the IO */
75 for (node
= 0; node
< CONFIG_MAX_CPUS
; node
++) {
76 state
= smm_get_save_state(node
);
78 io_misc_info
= save_state_ops
->get_io_misc_info(state
);
80 /* Check for Synchronous IO (bit0==1) */
81 if (!(io_misc_info
& (1 << 0)))
83 /* Make sure it was a write (bit4==0) */
84 if (io_misc_info
& (1 << 4))
86 /* Check for APMC IO port */
87 if (((io_misc_info
>> 16) & 0xff) != APM_CNT
)
89 /* Check AL against the requested command */
90 reg_al
= save_state_ops
->get_reg(state
, RAX
);
98 /* Inherited from cpu/x86/smm.h resulting in a different signature */
99 void southbridge_smi_set_eos(void)
104 static void busmaster_disable_on_bus(int bus
)
110 for (slot
= 0; slot
< 0x20; slot
++) {
111 for (func
= 0; func
< 8; func
++) {
114 pci_devfn_t dev
= PCI_DEV(bus
, slot
, func
);
116 if (!smihandler_soc_disable_busmaster(dev
))
118 val
= pci_read_config32(dev
, PCI_VENDOR_ID
);
120 if (val
== 0xffffffff || val
== 0x00000000 ||
121 val
== 0x0000ffff || val
== 0xffff0000)
124 /* Disable Bus Mastering for this one device */
125 reg16
= pci_read_config16(dev
, PCI_COMMAND
);
126 reg16
&= ~PCI_COMMAND_MASTER
;
127 pci_write_config16(dev
, PCI_COMMAND
, reg16
);
129 /* If it's not a bridge, move on. */
130 hdr
= pci_read_config8(dev
, PCI_HEADER_TYPE
);
132 if (hdr
!= PCI_HEADER_TYPE_BRIDGE
&&
133 hdr
!= PCI_HEADER_TYPE_CARDBUS
)
137 * If secondary bus is equal to current bus bypass
138 * the bridge because it's likely unconfigured and
139 * would cause infinite recursion.
141 int secbus
= pci_read_config8(dev
, PCI_SECONDARY_BUS
);
146 busmaster_disable_on_bus(secbus
);
151 void smihandler_southbridge_sleep(
152 const struct smm_save_state_ops
*save_state_ops
)
157 /* First, disable further SMIs */
158 pmc_disable_smi(SLP_SMI_EN
);
159 /* Figure out SLP_TYP */
160 reg32
= inl(ACPI_BASE_ADDRESS
+ PM1_CNT
);
161 printk(BIOS_SPEW
, "SMI#: SLP = 0x%08x\n", reg32
);
162 slp_typ
= acpi_sleep_from_pm1(reg32
);
164 /* Do any mainboard sleep handling */
165 mainboard_smi_sleep(slp_typ
);
167 /* Log S3, S4, and S5 entry */
168 if (slp_typ
>= ACPI_S3
)
169 elog_gsmi_add_event_byte(ELOG_TYPE_ACPI_ENTER
, slp_typ
);
171 /* Clear pending GPE events */
172 pmc_clear_all_gpe_status();
174 /* Next, do the deed. */
178 printk(BIOS_DEBUG
, "SMI#: Entering S0 (On)\n");
181 printk(BIOS_DEBUG
, "SMI#: Entering S3 (Suspend-To-RAM)\n");
182 if (CONFIG(SOC_INTEL_COMMON_BLOCK_UART
))
183 gnvs
->uior
= uart_is_controller_initialized();
185 /* Invalidate the cache before going to S3 */
189 printk(BIOS_DEBUG
, "SMI#: Entering S4 (Suspend-To-Disk)\n");
192 printk(BIOS_DEBUG
, "SMI#: Entering S5 (Soft Power off)\n");
194 /* Disable all GPE */
195 pmc_disable_all_gpe();
196 /* Set which state system will be after power reapplied */
197 pmc_set_power_failure_state(false);
198 /* also iterates over all bridges on bus 0 */
199 busmaster_disable_on_bus(0);
202 * Some platforms (e.g. Chromebooks) have observed race between
203 * SLP SMI and PWRBTN SMI because of the way these SMIs are
204 * triggered on power button press. Allow adding a delay before
205 * triggering sleep enable for S5, so that power button
206 * interrupt does not result into immediate wake.
208 mdelay(CONFIG_SOC_INTEL_COMMON_BLOCK_SMM_S5_DELAY_MS
);
211 * Ensure any pending power button status bits are cleared as
212 * the system is entering S5 and doesn't want to be woken up
213 * right away from older power button events.
215 pmc_clear_pm1_status();
219 printk(BIOS_DEBUG
, "SMI#: ERROR: SLP_TYP reserved\n");
224 * Write back to the SLP register to cause the originally intended
225 * event again. We need to set BIT13 (SLP_EN) though to make the
228 pmc_enable_pm1_control(SLP_EN
);
230 /* Make sure to stop executing code here for S3/S4/S5 */
231 if (slp_typ
>= ACPI_S3
)
235 * In most sleep states, the code flow of this function ends at
236 * the line above. However, if we entered sleep state S1 and wake
237 * up again, we will continue to execute code in this function.
239 if (pmc_read_pm1_control() & SCI_EN
) {
240 /* The OS is not an ACPI OS, so we set the state to S0 */
241 pmc_disable_pm1_control(SLP_EN
| SLP_TYP
);
245 static void southbridge_smi_gsmi(
246 const struct smm_save_state_ops
*save_state_ops
)
252 io_smi
= find_save_state(save_state_ops
, APM_CNT_ELOG_GSMI
);
255 /* Command and return value in EAX */
256 sub_command
= (save_state_ops
->get_reg(io_smi
, RAX
) >> 8)
259 /* Parameter buffer in EBX */
260 reg_ebx
= save_state_ops
->get_reg(io_smi
, RBX
);
262 /* drivers/elog/gsmi.c */
263 ret
= gsmi_exec(sub_command
, ®_ebx
);
264 save_state_ops
->set_reg(io_smi
, RAX
, ret
);
267 static void set_insmm_sts(const bool enable_writes
)
270 .lo
= read32p(0xfed30880),
278 wrmsr(MSR_SPCL_CHIPSET_USAGE
, msr
);
281 static void southbridge_smi_store(
282 const struct smm_save_state_ops
*save_state_ops
)
288 io_smi
= find_save_state(save_state_ops
, APM_CNT_SMMSTORE
);
291 /* Command and return value in EAX */
292 sub_command
= (save_state_ops
->get_reg(io_smi
, RAX
) >> 8) & 0xff;
294 /* Parameter buffer in EBX */
295 reg_ebx
= save_state_ops
->get_reg(io_smi
, RBX
);
297 const bool wp_enabled
= !fast_spi_wpd_status();
301 * As per BWG, clearing "SPI_BIOS_CONTROL_SYNC_SS"
302 * bit is a must prior setting SPI_BIOS_CONTROL_WPD" bit
303 * to avoid 3-strike error.
305 fast_spi_clear_sync_smi_status();
306 fast_spi_disable_wp();
309 /* drivers/smmstore/smi.c */
310 ret
= smmstore_exec(sub_command
, (void *)(uintptr_t)reg_ebx
);
311 save_state_ops
->set_reg(io_smi
, RAX
, ret
);
314 fast_spi_enable_wp();
315 set_insmm_sts(false);
319 __weak
const struct gpio_lock_config
*soc_gpio_lock_config(size_t *num
)
325 static void soc_lock_gpios(void)
327 const struct gpio_lock_config
*soc_gpios
;
330 /* get list of gpios from SoC */
331 soc_gpios
= soc_gpio_lock_config(&soc_gpio_num
);
333 /* Lock any soc requested gpios */
335 gpio_lock_pads(soc_gpios
, soc_gpio_num
);
338 static void finalize(void)
340 static int finalize_done
;
343 printk(BIOS_DEBUG
, "SMM already finalized.\n");
348 if (CONFIG(SPI_FLASH_SMM
))
349 /* Re-init SPI driver to handle locked BAR */
352 if (CONFIG(BOOTMEDIA_SMM_BWP
)) {
353 fast_spi_enable_wp();
354 set_insmm_sts(false);
358 * HECI is disabled in smihandler_soc_at_finalize() which also locks down the side band
359 * interface. Some boards may require this interface in mainboard_smi_finalize(),
360 * therefore, this call must precede smihandler_soc_at_finalize().
362 mainboard_smi_finalize();
364 /* Lock down all GPIOs that may have been requested by the SoC and/or the mainboard. */
365 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SMM_LOCK_GPIO_PADS
))
368 /* Specific SOC SMI handler during ramstage finalize phase */
369 smihandler_soc_at_finalize();
372 void smihandler_southbridge_apmc(
373 const struct smm_save_state_ops
*save_state_ops
)
377 reg8
= apm_get_apmc();
379 case APM_CNT_ACPI_DISABLE
:
380 pmc_disable_pm1_control(SCI_EN
);
382 case APM_CNT_ACPI_ENABLE
:
383 pmc_enable_pm1_control(SCI_EN
);
385 case APM_CNT_ELOG_GSMI
:
386 if (CONFIG(ELOG_GSMI
))
387 southbridge_smi_gsmi(save_state_ops
);
389 case APM_CNT_SMMSTORE
:
390 if (CONFIG(SMMSTORE
))
391 southbridge_smi_store(save_state_ops
);
393 case APM_CNT_FINALIZE
:
398 mainboard_smi_apmc(reg8
);
401 void smihandler_southbridge_pm1(
402 const struct smm_save_state_ops
*save_state_ops
)
404 uint16_t pm1_sts
= pmc_clear_pm1_status();
405 u16 pm1_en
= pmc_read_pm1_enable();
408 * While OSPM is not active, poweroff immediately
409 * on a power button event.
411 if ((pm1_sts
& PWRBTN_STS
) && (pm1_en
& PWRBTN_EN
)) {
412 /* power button pressed */
413 elog_gsmi_add_event(ELOG_TYPE_POWER_BUTTON
);
414 pmc_disable_pm1_control(~0);
415 pmc_enable_pm1_control(SLP_EN
| (SLP_TYP_S5
<< SLP_TYP_SHIFT
));
419 void smihandler_southbridge_gpe0(
420 const struct smm_save_state_ops
*save_state_ops
)
422 pmc_clear_all_gpe_status();
425 void smihandler_southbridge_tco(
426 const struct smm_save_state_ops
*save_state_ops
)
428 uint32_t tco_sts
= pmc_clear_tco_status();
431 * SPI synchronous SMIs are TCO SMIs, but they do not have a status
432 * bit in the TCO_STS register. Furthermore, the TCO_STS bit in the
433 * SMI_STS register is continually set until the SMI handler clears
434 * the SPI synchronous SMI status bit in the SPI controller. To not
435 * risk missing any other TCO SMIs, do not clear the TCO_STS bit in
436 * this SMI handler invocation. If the TCO_STS bit remains set when
437 * returning from SMM, another SMI immediately happens which clears
438 * the TCO_STS bit and handles any pending events.
440 fast_spi_clear_sync_smi_status();
442 /* If enabled, enforce SMM BIOS write protection */
443 if (CONFIG(BOOTMEDIA_SMM_BWP
) && fast_spi_wpd_status()) {
445 * BWE is RW, so the SMI was caused by a
446 * write to BWE, not by a write to the BIOS
448 * This is the place where we notice someone
449 * is trying to tinker with the BIOS. We are
450 * trying to be nice and just ignore it. A more
451 * resolute answer would be to power down the
454 printk(BIOS_DEBUG
, "Switching SPI back to RO\n");
455 fast_spi_enable_wp();
456 set_insmm_sts(false);
463 if (tco_sts
& TCO1_STS_TIMEOUT
) { /* TIMEOUT */
464 /* Handle TCO timeout */
465 printk(BIOS_DEBUG
, "TCO Timeout.\n");
468 if (tco_sts
& (TCO2_INTRD_DET
<< 16)) { /* INTRUDER# assertion */
470 * Handle intrusion event
471 * If we ever get here, probably the case has been opened.
473 printk(BIOS_CRIT
, "Case intrusion detected.\n");
477 void smihandler_southbridge_periodic(
478 const struct smm_save_state_ops
*save_state_ops
)
482 reg32
= pmc_get_smi_en();
484 /* Are periodic SMIs enabled? */
485 if ((reg32
& PERIODIC_EN
) == 0)
487 printk(BIOS_DEBUG
, "Periodic SMI.\n");
489 if (CONFIG(SOC_INTEL_COMMON_OC_WDT_RELOAD_IN_PERIODIC_SMI
))
493 void smihandler_southbridge_gpi(
494 const struct smm_save_state_ops
*save_state_ops
)
496 struct gpi_status smi_sts
;
498 gpi_clear_get_smi_status(&smi_sts
);
499 mainboard_smi_gpi_handler(&smi_sts
);
501 /* Clear again after mainboard handler */
502 gpi_clear_get_smi_status(&smi_sts
);
505 void smihandler_southbridge_espi(
506 const struct smm_save_state_ops
*save_state_ops
)
508 mainboard_smi_espi_handler();
511 void southbridge_smi_handler(void)
515 const struct smm_save_state_ops
*save_state_ops
;
518 * We need to clear the SMI status registers, or we won't see what's
519 * happening in the following calls.
521 smi_sts
= pmc_clear_smi_status();
524 * When the SCI_EN bit is set, PM1 and GPE0 events will trigger a SCI
525 * instead of a SMI#. However, SMI_STS bits PM1_STS and GPE0_STS can
526 * still be set. Therefore, when SCI_EN is set, ignore PM1 and GPE0
527 * events in the SMI# handler, as these events have triggered a SCI.
528 * Do not ignore any other SMI# types, since they cannot cause a SCI.
530 if (pmc_read_pm1_control() & SCI_EN
)
531 smi_sts
&= ~(1 << PM1_STS_BIT
| 1 << GPE0_STS_BIT
);
536 save_state_ops
= get_smm_save_state_ops();
538 /* Call SMI sub handler for each of the status bits */
539 for (i
= 0; i
< ARRAY_SIZE(southbridge_smi
); i
++) {
540 if (!(smi_sts
& (1 << i
)))
543 if (southbridge_smi
[i
] != NULL
) {
544 southbridge_smi
[i
](save_state_ops
);
547 "SMI_STS[%d] occurred, but no "
548 "handler available.\n", i
);
553 static uint32_t em64t100_smm_save_state_get_io_misc_info(void *state
)
555 em64t100_smm_state_save_area_t
*smm_state
= state
;
556 return smm_state
->io_misc_info
;
559 static uint64_t em64t100_smm_save_state_get_reg(void *state
, enum smm_reg reg
)
562 em64t100_smm_state_save_area_t
*smm_state
= state
;
566 value
= smm_state
->rax
;
569 value
= smm_state
->rbx
;
572 value
= smm_state
->rcx
;
575 value
= smm_state
->rdx
;
583 static void em64t100_smm_save_state_set_reg(void *state
, enum smm_reg reg
,
586 em64t100_smm_state_save_area_t
*smm_state
= state
;
589 smm_state
->rax
= val
;
592 smm_state
->rbx
= val
;
595 smm_state
->rcx
= val
;
598 smm_state
->rdx
= val
;
605 static uint32_t em64t101_smm_save_state_get_io_misc_info(void *state
)
607 em64t101_smm_state_save_area_t
*smm_state
= state
;
608 return smm_state
->io_misc_info
;
611 static uint64_t em64t101_smm_save_state_get_reg(void *state
, enum smm_reg reg
)
614 em64t101_smm_state_save_area_t
*smm_state
= state
;
618 value
= smm_state
->rax
;
621 value
= smm_state
->rbx
;
624 value
= smm_state
->rcx
;
627 value
= smm_state
->rdx
;
635 static void em64t101_smm_save_state_set_reg(void *state
, enum smm_reg reg
,
638 em64t101_smm_state_save_area_t
*smm_state
= state
;
641 smm_state
->rax
= val
;
644 smm_state
->rbx
= val
;
647 smm_state
->rcx
= val
;
650 smm_state
->rdx
= val
;
657 const struct smm_save_state_ops em64t100_smm_ops
= {
658 .get_io_misc_info
= em64t100_smm_save_state_get_io_misc_info
,
659 .get_reg
= em64t100_smm_save_state_get_reg
,
660 .set_reg
= em64t100_smm_save_state_set_reg
,
663 const struct smm_save_state_ops em64t101_smm_ops
= {
664 .get_io_misc_info
= em64t101_smm_save_state_get_io_misc_info
,
665 .get_reg
= em64t101_smm_save_state_get_reg
,
666 .set_reg
= em64t101_smm_save_state_set_reg
,