cpu/x86/(sipi|smm): Pass on CR3 from ramstage
[coreboot2.git] / src / include / cpu / x86 / smm.h
bloba12065b90b733a5564c8e7452a65d339003751aa
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef CPU_X86_SMM_H
4 #define CPU_X86_SMM_H
6 #include <arch/cpu.h>
7 #include <commonlib/region.h>
8 #include <device/pci_type.h>
9 #include <device/resource.h>
10 #include <types.h>
12 #define SMM_DEFAULT_BASE 0x30000
13 #define SMM_DEFAULT_SIZE 0x10000
15 /* used only by C programs so far */
16 #define SMM_BASE 0xa0000
18 #define SMM_ENTRY_OFFSET 0x8000
19 #define SMM_SAVE_STATE_BEGIN(x) (SMM_ENTRY_OFFSET + (x))
21 #define APM_CNT 0xb2
22 #define APM_CNT_NOOP_SMI 0x00
23 #define APM_CNT_ACPI_DISABLE 0x1e
24 #define APM_CNT_ACPI_ENABLE 0xe1
25 #define APM_CNT_ROUTE_ALL_XHCI 0xca
26 #define APM_CNT_FINALIZE 0xcb
27 #define APM_CNT_LEGACY 0xcc
28 #define APM_CNT_MBI_UPDATE 0xeb
29 #define APM_CNT_SMMINFO 0xec
30 #define APM_CNT_SMMSTORE 0xed
31 #define APM_CNT_ELOG_GSMI 0xef
32 #define APM_STS 0xb3
34 #define SMM_PCI_RESOURCE_STORE_NUM_RESOURCES 6
37 * SMI Transfer Monitor (STM) descriptor reserved in SMM save state.
39 #if CONFIG(STM)
40 #define STM_PSD_SIZE ALIGN_UP(sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR), 0x100)
41 #else
42 #define STM_PSD_SIZE 0
43 #endif
45 /* Send cmd to APM_CNT with HAVE_SMI_HANDLER checking. */
46 enum cb_err apm_control(u8 cmd);
47 u8 apm_get_apmc(void);
49 void io_trap_handler(int smif);
50 int mainboard_io_trap_handler(int smif);
52 void southbridge_smi_set_eos(void);
54 void global_smi_enable(void);
55 void global_smi_enable_no_pwrbtn(void);
57 void cpu_smi_handler(void);
58 void northbridge_smi_handler(void);
59 void southbridge_smi_handler(void);
61 void mainboard_smi_gpi(u32 gpi_sts);
62 int mainboard_smi_apmc(u8 data);
63 void mainboard_smi_sleep(u8 slp_typ);
64 void mainboard_smi_finalize(void);
65 int mainboard_set_smm_log_level(void);
67 void smm_soc_early_init(void);
68 void smm_soc_exit(void);
70 /* This is the SMM handler. */
71 extern unsigned char _binary_smm_start[];
72 extern unsigned char _binary_smm_end[];
74 struct smm_pci_resource_info {
75 pci_devfn_t pci_addr;
76 uint16_t vendor_id;
77 uint16_t device_id;
78 uint16_t class_device;
79 uint8_t class_prog;
80 struct resource resources[SMM_PCI_RESOURCE_STORE_NUM_RESOURCES];
83 struct smm_runtime {
84 u32 smbase;
85 u32 smm_size;
86 u32 save_state_size;
87 u32 num_cpus;
88 u32 gnvs_ptr;
89 u32 cbmemc_size;
90 void *cbmemc;
91 #if CONFIG(SMM_PCI_RESOURCE_STORE)
92 struct smm_pci_resource_info pci_resources[CONFIG_SMM_PCI_RESOURCE_STORE_NUM_SLOTS];
93 #endif
94 uintptr_t save_state_top[CONFIG_MAX_CPUS];
95 int smm_log_level;
96 } __packed;
98 struct smm_module_params {
99 size_t cpu;
100 /* A canary value that has been placed at the end of the stack.
101 * If (uintptr_t)canary != *canary then a stack overflow has occurred.
103 const uintptr_t *canary;
106 /* These parameters are used by the SMM stub code. A pointer to the params
107 * is also passed to the C-base handler. */
108 struct smm_stub_params {
109 u32 stack_size;
110 u32 stack_top;
111 u32 c_handler;
112 u32 cr3;
113 /* The apic_id_to_cpu provides a mapping from APIC id to CPU number.
114 * The CPU number is indicated by the index into the array by matching
115 * the default APIC id and value at the index. The stub loader
116 * initializes this array with a 1:1 mapping. If the APIC ids are not
117 * contiguous like the 1:1 mapping it is up to the caller of the stub
118 * loader to adjust this mapping. */
119 u16 apic_id_to_cpu[CONFIG_MAX_CPUS];
120 } __packed;
122 /* smm_handler_t is called with arg of smm_module_params pointer. */
123 typedef asmlinkage void (*smm_handler_t)(void *);
125 /* SMM Runtime helpers. */
126 #if ENV_SMM
127 extern struct global_nvs *gnvs;
128 #endif
130 /* Entry point for SMM modules. */
131 asmlinkage void smm_handler_start(void *params);
133 /* Retrieve SMM save state for a given CPU. WARNING: This does not take into
134 * account CPUs which are configured to not save their state to RAM. */
135 void *smm_get_save_state(int cpu);
137 /* Returns true if the region overlaps with the SMM */
138 bool smm_region_overlaps_handler(const struct region *r);
140 /* Returns true if the memory pointed to overlaps with SMM reserved memory. */
141 static inline bool smm_points_to_smram(const void *ptr, const size_t len)
143 const struct region r = {(uintptr_t)ptr, len};
145 return smm_region_overlaps_handler(&r);
148 /* SMM Module Loading API */
150 /* The smm_loader_params structure provides direction to the SMM loader:
151 * - num_cpus - number of concurrent cpus in handler needing stack
152 * optional for setting up relocation handler.
153 * - cpu_save_state_size - the SMM save state size per cpu
154 * - num_concurrent_save_states - number of concurrent cpus needing save state
155 * space
156 * - handler - optional handler to call. Only used during SMM relocation setup.
157 * - runtime - this field is a result only. The SMM runtime location is filled
158 * into this field so the code doing the loading can manipulate the
159 * runtime's assumptions. e.g. updating the APIC id to CPU map to
160 * handle sparse APIC id space.
162 struct smm_loader_params {
163 size_t num_cpus;
165 size_t cpu_save_state_size;
166 size_t num_concurrent_save_states;
168 smm_handler_t handler;
169 uint32_t cr3;
172 /* All of these return 0 on success, < 0 on failure. */
173 int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size,
174 const unsigned int total_cpus, const size_t stack_size);
175 int smm_setup_relocation_handler(struct smm_loader_params *params);
176 int smm_load_module(uintptr_t smram_base, size_t smram_size, struct smm_loader_params *params);
178 u32 smm_get_cpu_smbase(unsigned int cpu_num);
180 /* Backup and restore default SMM region. */
181 void *backup_default_smm_area(void);
182 void restore_default_smm_area(void *smm_save_area);
185 * Fills in the arguments for the entire SMM region covered by chipset
186 * protections. e.g. TSEG.
188 void smm_region(uintptr_t *start, size_t *size);
190 static inline void aseg_region(uintptr_t *start, size_t *size)
192 *start = SMM_BASE;
193 *size = SMM_DEFAULT_SIZE; /* SMM_CODE_SEGMENT_SIZE ? */
196 enum {
197 /* SMM handler area. */
198 SMM_SUBREGION_HANDLER,
199 /* SMM cache region. */
200 SMM_SUBREGION_CACHE,
201 /* Chipset specific area. */
202 SMM_SUBREGION_CHIPSET,
203 /* Total sub regions supported. */
204 SMM_SUBREGION_NUM,
207 /* Fills in the start and size for the requested SMM subregion. Returns
208 * 0 on success, < 0 on failure. */
209 int smm_subregion(int sub, uintptr_t *start, size_t *size);
211 /* Print the SMM memory layout on console. */
212 void smm_list_regions(void);
214 #define SMM_REVISION_OFFSET_FROM_TOP (0x8000 - 0x7efc)
215 /* Return the SMM save state revision. The revision can be fetched from the smm savestate
216 which is always at the same offset downward from the top of the save state. */
217 uint32_t smm_revision(void);
218 /* Returns the PM ACPI SMI port. On Intel systems this typically not configurable (APM_CNT, 0xb2).
219 On AMD systems it is sometimes configurable. */
220 uint16_t pm_acpi_smi_cmd_port(void);
222 const volatile struct smm_pci_resource_info *smm_get_pci_resource_store(void);
224 void smm_pci_get_stored_resources(const volatile struct smm_pci_resource_info **out_slots,
225 size_t *out_size);
226 /* Weak handler function to store PCI BARs. */
227 void smm_mainboard_pci_resource_store_init(struct smm_pci_resource_info *slots, size_t size);
228 /* Helper function to fill BARs from an array of device pointers. */
229 bool smm_pci_resource_store_fill_resources(struct smm_pci_resource_info *slots, size_t num_slots,
230 const struct device **devices, size_t num_devices);
232 void smm_pci_resource_store_init(struct smm_runtime *smm_runtime);
234 #endif /* CPU_X86_SMM_H */