1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpi_gnvs.h>
5 #include <commonlib/helpers.h>
6 #include <commonlib/region.h>
7 #include <console/console.h>
9 #include <cpu/x86/smm.h>
10 #include <device/device.h>
11 #include <device/mmio.h>
18 #define SMM_CODE_SEGMENT_SIZE 0x10000
21 * Components that make up the SMRAM:
22 * 1. Save state - the total save state memory used
23 * 2. Stack - stacks for the CPUs in the SMM handler
24 * 3. Stub - SMM stub code for calling into handler
25 * 4. Handler - C-based SMM handler.
27 * The components are assumed to consist of one consecutive region.
31 * The stub is the entry point that sets up protected mode and stacks for each
32 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
34 extern unsigned char _binary_smmstub_start
[];
36 /* Per CPU minimum stack size. */
37 #define SMM_MINIMUM_STACK_SIZE 32
43 struct region stub_code
;
45 struct cpu_smm_info cpus
[CONFIG_MAX_CPUS
] = { 0 };
48 * This method creates a map of all the CPU entry points, save state locations
49 * and the beginning and end of code segments for each CPU. This map is used
50 * during relocation to properly align as many CPUs that can fit into the SMRAM
51 * region. For more information on how SMRAM works, refer to the latest Intel
52 * developer's manuals (volume 3, chapter 34). SMRAM is divided up into the
54 * +-----------------+ Top of SMRAM
71 * +-----------------+ <- START of SMRAM
73 * The code below checks when a code segment is full and begins placing the remainder
74 * CPUs in the lower segments. The entry point for each CPU is smbase + 0x8000
75 * and save state is smbase + 0x8000 + (0x8000 - state save size). Save state
76 * area grows downward into the CPUs entry point. Therefore staggering too many
77 * CPUs in one 32K block will corrupt CPU0's entry code as the save states move
79 * input : smbase of first CPU (all other CPUs
80 * will go below this address)
81 * input : num_cpus in the system. The map will
82 * be created from 0 to num_cpus.
84 static int smm_create_map(const uintptr_t smbase
, const unsigned int num_cpus
,
85 const struct smm_loader_params
*params
)
87 struct rmodule smm_stub
;
89 if (ARRAY_SIZE(cpus
) < num_cpus
) {
90 printk(BIOS_ERR
, "%s: increase MAX_CPUS in Kconfig\n", __func__
);
94 if (rmodule_parse(&_binary_smmstub_start
, &smm_stub
)) {
95 printk(BIOS_ERR
, "%s: unable to get SMM module size\n", __func__
);
100 * How many CPUs can fit into one 64K segment?
101 * Make sure that the first stub does not overlap with the last save state of a segment.
103 const size_t stub_size
= rmodule_memory_size(&smm_stub
);
104 const size_t needed_ss_size
= MAX(params
->cpu_save_state_size
, stub_size
);
105 const size_t cpus_per_segment
=
106 (SMM_CODE_SEGMENT_SIZE
- SMM_ENTRY_OFFSET
- stub_size
) / needed_ss_size
;
108 if (cpus_per_segment
== 0) {
109 printk(BIOS_ERR
, "%s: CPUs won't fit in segment. Broken stub or save state size\n",
114 for (unsigned int i
= 0; i
< num_cpus
; i
++) {
115 const size_t segment_number
= i
/ cpus_per_segment
;
116 cpus
[i
].smbase
= smbase
- SMM_CODE_SEGMENT_SIZE
* segment_number
117 - needed_ss_size
* (i
% cpus_per_segment
);
118 cpus
[i
].stub_code
= region_create(cpus
[i
].smbase
+ SMM_ENTRY_OFFSET
, stub_size
);
119 cpus
[i
].ss
= region_create(
120 cpus
[i
].smbase
+ SMM_CODE_SEGMENT_SIZE
- params
->cpu_save_state_size
,
121 params
->cpu_save_state_size
);
129 * This method expects the smm relocation map to be complete.
130 * This method does not read any HW registers, it simply uses a
131 * map that was created during SMM setup.
132 * input: cpu_num - cpu number which is used as an index into the
133 * map to return the smbase
135 u32
smm_get_cpu_smbase(unsigned int cpu_num
)
137 if (cpu_num
< CONFIG_MAX_CPUS
) {
138 if (cpus
[cpu_num
].active
)
139 return cpus
[cpu_num
].smbase
;
145 * This method assumes that at least 1 CPU has been set up from
146 * which it will place other CPUs below its smbase ensuring that
147 * save state does not clobber the first CPUs init code segment. The init
148 * code which is the smm stub code is the same for all CPUs. They enter
149 * smm, setup stacks (based on their apic id), enter protected mode
150 * and then jump to the common smi handler. The stack is allocated
151 * at the beginning of smram (aka tseg base, not smbase). The stack
152 * pointer for each CPU is calculated by using its apic id
153 * (code is in smm_stub.s)
154 * Each entry point will now have the same stub code which, sets up the CPU
155 * stack, enters protected mode and then jumps to the smi handler. It is
156 * important to enter protected mode before the jump because the "jump to
157 * address" might be larger than the 20bit address supported by real mode.
158 * SMI entry right now is in real mode.
159 * input: num_cpus - number of cpus that need relocation including
160 * the first CPU (though its code is already loaded)
163 static void smm_place_entry_code(const unsigned int num_cpus
)
168 /* start at 1, the first CPU stub code is already there */
169 size
= region_sz(&cpus
[0].stub_code
);
170 for (i
= 1; i
< num_cpus
; i
++) {
172 "SMM Module: placing smm entry code at %zx, cpu # 0x%x\n",
173 region_offset(&cpus
[i
].stub_code
), i
);
174 memcpy((void *)region_offset(&cpus
[i
].stub_code
),
175 (void *)region_offset(&cpus
[0].stub_code
), size
);
176 printk(BIOS_SPEW
, "%s: copying from %zx to %zx 0x%zx bytes\n",
177 __func__
, region_offset(&cpus
[0].stub_code
),
178 region_offset(&cpus
[i
].stub_code
), size
);
182 static uintptr_t stack_top
;
183 static size_t g_stack_size
;
185 int smm_setup_stack(const uintptr_t perm_smbase
, const size_t perm_smram_size
,
186 const unsigned int total_cpus
, const size_t stack_size
)
188 /* Need a minimum stack size and alignment. */
189 if (stack_size
<= SMM_MINIMUM_STACK_SIZE
|| (stack_size
& 3) != 0) {
190 printk(BIOS_ERR
, "%s: need minimum stack size\n", __func__
);
194 const size_t total_stack_size
= total_cpus
* stack_size
;
195 if (total_stack_size
>= perm_smram_size
) {
196 printk(BIOS_ERR
, "%s: Stack won't fit smram\n", __func__
);
199 stack_top
= perm_smbase
+ total_stack_size
;
200 g_stack_size
= stack_size
;
205 * Place the staggered entry points for each CPU. The entry points are
206 * staggered by the per CPU SMM save state size extending down from
209 static void smm_stub_place_staggered_entry_points(const struct smm_loader_params
*params
)
211 if (params
->num_concurrent_save_states
> 1)
212 smm_place_entry_code(params
->num_concurrent_save_states
);
216 * The stub setup code assumes it is completely contained within the
217 * default SMRAM size (0x10000) for the default SMI handler (entry at
218 * 0x30000), but no assumption should be made for the permanent SMI handler.
219 * The placement of CPU entry points for permanent handler are determined
220 * by the number of CPUs in the system and the amount of SMRAM.
221 * There are potentially 2 regions to place
222 * within the default SMRAM size:
223 * 1. Save state areas
226 * The save state always lives at the top of the CPUS smbase (and the entry
227 * point is at offset 0x8000). This allows only a certain number of CPUs with
228 * staggered entry points until the save state area comes down far enough to
229 * overwrite/corrupt the entry code (stub code). Therefore, an SMM map is
230 * created to avoid this corruption, see smm_create_map() above.
231 * This module setup code works for the default (0x30000) SMM handler setup and the
232 * permanent SMM handler.
233 * The CPU stack is decided at runtime in the stub and is treaded as a continuous
234 * region. As this might not fit the default SMRAM region, the same region used
235 * by the permanent handler can be used during relocation.
237 static int smm_module_setup_stub(const uintptr_t smbase
, const size_t smm_size
,
238 struct smm_loader_params
*params
)
240 struct rmodule smm_stub
;
241 if (rmodule_parse(&_binary_smmstub_start
, &smm_stub
)) {
242 printk(BIOS_ERR
, "%s: unable to parse smm stub\n", __func__
);
245 const size_t stub_size
= rmodule_memory_size(&smm_stub
);
247 /* Some sanity check */
248 if (stub_size
>= SMM_ENTRY_OFFSET
) {
249 printk(BIOS_ERR
, "%s: Stub too large\n", __func__
);
253 const uintptr_t smm_stub_loc
= smbase
+ SMM_ENTRY_OFFSET
;
254 if (rmodule_load((void *)smm_stub_loc
, &smm_stub
)) {
255 printk(BIOS_ERR
, "%s: load module failed\n", __func__
);
259 struct smm_stub_params
*stub_params
= rmodule_parameters(&smm_stub
);
260 stub_params
->stack_top
= stack_top
;
261 stub_params
->stack_size
= g_stack_size
;
262 stub_params
->c_handler
= (uintptr_t)params
->handler
;
263 stub_params
->cr3
= params
->cr3
;
265 /* This runs on the BSP. All the APs are its siblings */
266 struct cpu_info
*info
= cpu_info();
267 if (!info
|| !info
->cpu
) {
268 printk(BIOS_ERR
, "%s: Failed to find BSP struct device\n", __func__
);
272 for (struct device
*dev
= info
->cpu
; dev
; dev
= dev
->sibling
)
274 stub_params
->apic_id_to_cpu
[i
++] = dev
->path
.apic
.initial_lapicid
;
276 if (i
!= params
->num_cpus
) {
277 printk(BIOS_ERR
, "%s: Failed to set up apic map correctly\n", __func__
);
281 printk(BIOS_DEBUG
, "%s: stack_top = 0x%x\n", __func__
, stub_params
->stack_top
);
282 printk(BIOS_DEBUG
, "%s: per cpu stack_size = 0x%x\n", __func__
,
283 stub_params
->stack_size
);
284 printk(BIOS_DEBUG
, "%s: runtime.smm_size = 0x%zx\n", __func__
, smm_size
);
286 smm_stub_place_staggered_entry_points(params
);
288 printk(BIOS_DEBUG
, "SMM Module: stub loaded at %lx. Will call %p\n", smm_stub_loc
,
294 * smm_setup_relocation_handler assumes the callback is already loaded in
295 * memory. i.e. Another SMM module isn't chained to the stub. The other
296 * assumption is that the stub will be entered from the default SMRAM
297 * location: 0x30000 -> 0x40000.
299 int smm_setup_relocation_handler(struct smm_loader_params
*params
)
301 uintptr_t smram
= SMM_DEFAULT_BASE
;
302 printk(BIOS_SPEW
, "%s: enter\n", __func__
);
303 /* There can't be more than 1 concurrent save state for the relocation
304 * handler because all CPUs default to 0x30000 as SMBASE. */
305 if (params
->num_concurrent_save_states
> 1)
308 /* A handler has to be defined to call for relocation. */
309 if (params
->handler
== NULL
)
312 /* Since the relocation handler always uses stack, adjust the number
313 * of concurrent stack users to be CONFIG_MAX_CPUS. */
314 if (params
->num_cpus
== 0)
315 params
->num_cpus
= CONFIG_MAX_CPUS
;
317 printk(BIOS_SPEW
, "%s: exit\n", __func__
);
318 return smm_module_setup_stub(smram
, SMM_DEFAULT_SIZE
, params
);
321 static void setup_smihandler_params(struct smm_runtime
*mod_params
,
322 struct smm_loader_params
*loader_params
)
327 smm_region(&tseg_base
, &tseg_size
);
329 mod_params
->smbase
= tseg_base
;
330 mod_params
->smm_size
= tseg_size
;
331 mod_params
->save_state_size
= loader_params
->cpu_save_state_size
;
332 mod_params
->num_cpus
= loader_params
->num_cpus
;
333 mod_params
->gnvs_ptr
= (uint32_t)(uintptr_t)acpi_get_gnvs();
334 const struct cbmem_entry
*cbmemc
;
335 if (CONFIG(CONSOLE_CBMEM
) && (cbmemc
= cbmem_entry_find(CBMEM_ID_CONSOLE
))) {
336 mod_params
->cbmemc
= cbmem_entry_start(cbmemc
);
337 mod_params
->cbmemc_size
= cbmem_entry_size(cbmemc
);
339 mod_params
->cbmemc
= 0;
340 mod_params
->cbmemc_size
= 0;
343 for (int i
= 0; i
< loader_params
->num_cpus
; i
++)
344 mod_params
->save_state_top
[i
] = region_last(&cpus
[i
].ss
) + 1;
346 if (CONFIG(RUNTIME_CONFIGURABLE_SMM_LOGLEVEL
))
347 mod_params
->smm_log_level
= mainboard_set_smm_log_level();
349 mod_params
->smm_log_level
= 0;
351 if (CONFIG(SMM_PCI_RESOURCE_STORE
))
352 smm_pci_resource_store_init(mod_params
);
354 if (CONFIG(SMMSTORE_V2
)) {
355 struct smmstore_params_info info
;
356 if (smmstore_get_info(&info
) < 0) {
357 printk(BIOS_INFO
, "SMMSTORE: Failed to get meta data\n");
361 void *ptr
= cbmem_add(CBMEM_ID_SMM_COMBUFFER
, info
.block_size
);
363 printk(BIOS_ERR
, "SMMSTORE: Failed to add com buffer\n");
366 mod_params
->smmstore_com_buffer_base
= (uintptr_t)ptr
;
367 mod_params
->smmstore_com_buffer_size
= info
.block_size
;
371 static void print_region(const char *name
, const struct region region
)
373 printk(BIOS_DEBUG
, "%-12s [0x%zx-0x%zx]\n", name
, region_offset(®ion
),
374 region_last(®ion
));
377 /* STM + Handler + (Stub + Save state) * CONFIG_MAX_CPUS + stacks + page tables*/
378 #define SMM_REGIONS_ARRAY_SIZE (1 + 1 + CONFIG_MAX_CPUS * 2 + 1 + 1)
380 static int append_and_check_region(const struct region smram
,
381 const struct region region
,
382 struct region
*region_list
,
385 unsigned int region_counter
= 0;
386 for (; region_counter
< SMM_REGIONS_ARRAY_SIZE
; region_counter
++)
387 if (region_sz(®ion_list
[region_counter
]) == 0)
390 if (region_counter
>= SMM_REGIONS_ARRAY_SIZE
) {
391 printk(BIOS_ERR
, "Array used to check regions too small\n");
395 if (!region_is_subregion(&smram
, ®ion
)) {
396 printk(BIOS_ERR
, "%s not in SMM\n", name
);
400 print_region(name
, region
);
401 for (unsigned int i
= 0; i
< region_counter
; i
++) {
402 if (region_overlap(®ion_list
[i
], ®ion
)) {
403 printk(BIOS_ERR
, "%s overlaps with a previous region\n", name
);
408 region_list
[region_counter
] = region
;
413 #define _PRES (1ULL << 0)
414 #define _RW (1ULL << 1)
415 #define _US (1ULL << 2)
416 #define _A (1ULL << 5)
417 #define _D (1ULL << 6)
418 #define _PS (1ULL << 7)
419 #define _GEN_DIR(a) (_PRES + _RW + _US + _A + (a))
420 #define _GEN_PAGE(a) (_PRES + _RW + _US + _PS + _A + _D + (a))
423 /* Return the PM4LE */
424 static uintptr_t install_page_table(const uintptr_t handler_base
)
426 const bool one_g_pages
= !!(cpuid_edx(0x80000001) & (1 << 26));
427 /* 4 1G pages or 4 PDPE entries with 512 * 2M pages */
428 const size_t pages_needed
= one_g_pages
? 4 : 2048 + 4;
429 const uintptr_t pages_base
= ALIGN_DOWN(handler_base
- pages_needed
* PAGE_SIZE
, 4096);
430 const uintptr_t pm4le
= ALIGN_DOWN(pages_base
- 8, 4096);
433 for (size_t i
= 0; i
< 4; i
++)
434 write64p(pages_base
+ i
* PAGE_SIZE
, _GEN_PAGE(1ull * GiB
* i
));
435 write64p(pm4le
, _GEN_DIR(pages_base
));
437 for (size_t i
= 0; i
< 2048; i
++)
438 write64p(pages_base
+ i
* PAGE_SIZE
, _GEN_PAGE(2ull * MiB
* i
));
439 write64p(pm4le
, _GEN_DIR(pages_base
+ 2048 * PAGE_SIZE
));
440 for (size_t i
= 0; i
< 4; i
++)
441 write64p(pages_base
+ (2048 + i
) * PAGE_SIZE
, _GEN_DIR(pages_base
+ 4096 * i
));
447 *The SMM module is placed within the provided region in the following
449 * +-----------------+ <- smram + size
452 * +-----------------+
455 * +-----------------+
457 * +-----------------+ <- cpu0
458 * | stub code | <- cpu1
459 * | stub code | <- cpu2
460 * | stub code | <- cpu3, etc
465 * +-----------------+ <- smram start
467 * With CONFIG(SMM_TSEG) the stubs will be placed in the same segment as the
468 * permanent handler and the stacks.
470 int smm_load_module(const uintptr_t smram_base
, const size_t smram_size
,
471 struct smm_loader_params
*params
)
474 * Place in .bss to reduce stack usage.
475 * TODO: once CPU_INFO_V2 is used everywhere, use smaller stack for APs and move
476 * this back to the BSP stack.
478 static struct region region_list
[SMM_REGIONS_ARRAY_SIZE
] = {};
480 struct rmodule smi_handler
;
481 if (rmodule_parse(&_binary_smm_start
, &smi_handler
))
484 const struct region smram
= region_create(smram_base
, smram_size
);
485 const uintptr_t smram_top
= region_last(&smram
) + 1;
487 const size_t stm_size
=
488 CONFIG(STM
) ? CONFIG_MSEG_SIZE
+ CONFIG_BIOS_RESOURCE_LIST_SIZE
: 0;
491 struct region stm
= region_create(smram_top
- stm_size
, stm_size
);
492 if (append_and_check_region(smram
, stm
, region_list
, "STM"))
494 printk(BIOS_DEBUG
, "MSEG size 0x%x\n", CONFIG_MSEG_SIZE
);
495 printk(BIOS_DEBUG
, "BIOS res list 0x%x\n", CONFIG_BIOS_RESOURCE_LIST_SIZE
);
498 const size_t handler_size
= rmodule_memory_size(&smi_handler
);
499 const size_t handler_alignment
= rmodule_load_alignment(&smi_handler
);
500 const uintptr_t handler_base
=
501 ALIGN_DOWN(smram_top
- stm_size
- handler_size
,
503 struct region handler
= region_create(handler_base
, handler_size
);
504 if (append_and_check_region(smram
, handler
, region_list
, "HANDLER"))
507 uintptr_t stub_segment_base
;
509 uintptr_t pt_base
= install_page_table(handler_base
);
510 struct region page_tables
= region_create(pt_base
, handler_base
- pt_base
);
511 if (append_and_check_region(smram
, page_tables
, region_list
, "PAGE TABLES"))
513 params
->cr3
= pt_base
;
514 stub_segment_base
= pt_base
- SMM_CODE_SEGMENT_SIZE
;
516 stub_segment_base
= handler_base
- SMM_CODE_SEGMENT_SIZE
;
519 if (!smm_create_map(stub_segment_base
, params
->num_concurrent_save_states
, params
)) {
520 printk(BIOS_ERR
, "%s: Error creating CPU map\n", __func__
);
523 for (unsigned int i
= 0; i
< params
->num_concurrent_save_states
; i
++) {
524 printk(BIOS_DEBUG
, "\nCPU %u\n", i
);
526 snprintf(string
, sizeof(string
), " ss%d", i
);
527 if (append_and_check_region(smram
, cpus
[i
].ss
, region_list
, string
))
529 snprintf(string
, sizeof(string
), " stub%d", i
);
530 if (append_and_check_region(smram
, cpus
[i
].stub_code
, region_list
, string
))
534 struct region stacks
= region_create(smram_base
,
535 params
->num_concurrent_save_states
* CONFIG_SMM_MODULE_STACK_SIZE
);
536 printk(BIOS_DEBUG
, "\n");
537 if (append_and_check_region(smram
, stacks
, region_list
, "stacks"))
540 if (rmodule_load((void *)handler_base
, &smi_handler
))
543 struct smm_runtime
*smihandler_params
= rmodule_parameters(&smi_handler
);
544 params
->handler
= rmodule_entry(&smi_handler
);
545 setup_smihandler_params(smihandler_params
, params
);
547 return smm_module_setup_stub(stub_segment_base
, smram_size
, params
);