1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <cpu/intel/common/common.h>
5 #include <cpu/intel/em64t101_save_state.h>
6 #include <cpu/intel/smm_reloc.h>
7 #include <cpu/x86/mp.h>
8 #include <cpu/x86/msr.h>
9 #include <cpu/x86/mtrr.h>
10 #include <cpu/x86/smm.h>
11 #include <device/device.h>
12 #include <device/pci.h>
13 #include <device/pci_ops.h>
17 #include <soc/pci_devs.h>
18 #include <soc/soc_chip.h>
22 static void update_save_state(int cpu
, uintptr_t curr_smbase
,
23 uintptr_t staggered_smbase
,
24 struct smm_relocation_params
*relo_params
)
30 * The relocated handler runs with all CPUs concurrently. Therefore
31 * stagger the entry points adjusting SMBASE downwards by save state
34 smbase
= staggered_smbase
;
35 iedbase
= relo_params
->ied_base
;
37 printk(BIOS_DEBUG
, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
41 * All threads need to set IEDBASE and SMBASE to the relocated
42 * handler region. However, the save state location depends on the
43 * smm_save_state_in_msrs field in the relocation parameters. If
44 * smm_save_state_in_msrs is non-zero then the CPUs are relocating
45 * the SMM handler in parallel, and each CPUs save state area is
46 * located in their respective MSR space. If smm_save_state_in_msrs
47 * is zero then the SMM relocation is happening serially so the
48 * save state is at the same default location for all CPUs.
50 if (relo_params
->smm_save_state_in_msrs
) {
54 smbase_msr
.lo
= smbase
;
58 * According the BWG the IEDBASE MSR is in bits 63:32. It's
59 * not clear why it differs from the SMBASE MSR.
62 iedbase_msr
.hi
= iedbase
;
64 wrmsr(SMBASE_MSR
, smbase_msr
);
65 wrmsr(IEDBASE_MSR
, iedbase_msr
);
67 em64t101_smm_state_save_area_t
*save_state
;
69 save_state
= (void *)(curr_smbase
+ SMM_DEFAULT_SIZE
-
72 save_state
->smbase
= smbase
;
73 save_state
->iedbase
= iedbase
;
77 /* Returns 1 if SMM MSR save state was set. */
78 static int bsp_setup_msr_save_state(struct smm_relocation_params
*relo_params
)
82 smm_mca_cap
= rdmsr(SMM_MCA_CAP_MSR
);
83 if (smm_mca_cap
.hi
& SMM_CPU_SVRSTR_MASK
) {
84 msr_t smm_feature_control
;
86 smm_feature_control
= rdmsr(SMM_FEATURE_CONTROL_MSR
);
87 smm_feature_control
.hi
= 0;
88 smm_feature_control
.lo
|= SMM_CPU_SAVE_EN
;
89 wrmsr(SMM_FEATURE_CONTROL_MSR
, smm_feature_control
);
90 relo_params
->smm_save_state_in_msrs
= 1;
92 return relo_params
->smm_save_state_in_msrs
;
96 * The relocation work is actually performed in SMM context, but the code
97 * resides in the ramstage module. This occurs by trampolining from the default
98 * SMRAM entry point to here.
100 void smm_relocation_handler(int cpu
, uintptr_t curr_smbase
,
101 uintptr_t staggered_smbase
)
104 struct smm_relocation_params
*relo_params
= &smm_reloc_params
;
106 printk(BIOS_DEBUG
, "In relocation handler: CPU %d\n", cpu
);
109 * Determine if the processor supports saving state in MSRs. If so,
110 * enable it before the non-BSPs run so that SMM relocation can occur
111 * in parallel in the non-BSP CPUs.
115 * If smm_save_state_in_msrs is 1 then that means this is the
116 * 2nd time through the relocation handler for the BSP.
117 * Parallel SMM handler relocation is taking place. However,
118 * it is desired to access other CPUs save state in the real
119 * SMM handler. Therefore, disable the SMM save state in MSRs
122 if (relo_params
->smm_save_state_in_msrs
) {
123 msr_t smm_feature_control
;
125 smm_feature_control
= rdmsr(SMM_FEATURE_CONTROL_MSR
);
126 smm_feature_control
.lo
&= ~SMM_CPU_SAVE_EN
;
127 wrmsr(SMM_FEATURE_CONTROL_MSR
, smm_feature_control
);
128 } else if (bsp_setup_msr_save_state(relo_params
))
130 * Just return from relocation handler if MSR save
131 * state is enabled. In that case the BSP will come
132 * back into the relocation handler to setup the new
133 * SMBASE as well disabling SMM save state in MSRs.
138 /* Make appropriate changes to the save state map. */
139 update_save_state(cpu
, curr_smbase
, staggered_smbase
, relo_params
);
142 * The SMRR MSRs are core-level registers, so if two threads that share
143 * a core try to both set the lock bit (in the same physical register),
144 * a #GP will be raised on the second write to that register (which is
145 * exactly what the lock is supposed to do), therefore secondary threads
148 if (intel_ht_sibling())
151 /* Write SMRR MSRs based on indicated support. */
152 mtrr_cap
= rdmsr(MTRR_CAP_MSR
);
154 /* Set Lock bit if supported */
155 if (mtrr_cap
.lo
& SMRR_LOCK_SUPPORTED
)
156 relo_params
->smrr_mask
.lo
|= SMRR_PHYS_MASK_LOCK
;
158 /* Write SMRRs if supported */
159 if (mtrr_cap
.lo
& SMRR_SUPPORTED
)
160 write_smrr(relo_params
);
163 static void fill_in_relocation_params(struct smm_relocation_params
*params
)
167 /* All range registers are aligned to 4KiB */
168 const u32 rmask
= ~(4 * KiB
- 1);
170 smm_region(&tseg_base
, &tseg_size
);
172 if (!IS_ALIGNED(tseg_base
, tseg_size
)) {
173 printk(BIOS_WARNING
, "TSEG base not aligned with TSEG size! Not setting SMRR\n");
177 smm_subregion(SMM_SUBREGION_CHIPSET
, ¶ms
->ied_base
, ¶ms
->ied_size
);
179 /* SMRR has 32-bits of valid address aligned to 4KiB. */
180 params
->smrr_base
.lo
= (tseg_base
& rmask
) | MTRR_TYPE_WRBACK
;
181 params
->smrr_base
.hi
= 0;
182 params
->smrr_mask
.lo
= (~(tseg_size
- 1) & rmask
) | MTRR_PHYS_MASK_VALID
;
183 params
->smrr_mask
.hi
= 0;
186 static void setup_ied_area(struct smm_relocation_params
*params
)
190 struct ied_header ied
= {
191 .signature
= "INTEL RSVD",
192 .size
= params
->ied_size
,
196 ied_base
= (void *)params
->ied_base
;
198 printk(BIOS_DEBUG
, "IED base = 0x%08x\n", (u32
)params
->ied_base
);
199 printk(BIOS_DEBUG
, "IED size = 0x%08x\n", (u32
)params
->ied_size
);
201 /* Place IED header at IEDBASE. */
202 memcpy(ied_base
, &ied
, sizeof(ied
));
204 /* Zero out 32KiB at IEDBASE + 1MiB */
205 memset(ied_base
+ 1 * MiB
, 0, 32 * KiB
);
208 void smm_info(uintptr_t *perm_smbase
, size_t *perm_smsize
,
209 size_t *smm_save_state_size
)
211 printk(BIOS_DEBUG
, "Setting up SMI for CPU\n");
213 fill_in_relocation_params(&smm_reloc_params
);
215 smm_subregion(SMM_SUBREGION_HANDLER
, perm_smbase
, perm_smsize
);
217 if (smm_reloc_params
.ied_size
)
218 setup_ied_area(&smm_reloc_params
);
220 *smm_save_state_size
= sizeof(em64t101_smm_state_save_area_t
);
223 void smm_initialize(void)
225 /* Clear the SMM state in the southbridge. */
226 smm_southbridge_clear_state();
229 * Run the relocation handler for on the BSP to check and set up
230 * parallel SMM relocation.
232 smm_initiate_relocation();
234 if (smm_reloc_params
.smm_save_state_in_msrs
)
235 printk(BIOS_DEBUG
, "Doing parallel SMM relocation.\n");
238 void smm_relocate(void)
241 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
242 * shall take place. Run the relocation handler a second time on the
243 * BSP to do * the final move. For APs, a relocation handler always
246 if (smm_reloc_params
.smm_save_state_in_msrs
)
247 smm_initiate_relocation_parallel();
248 else if (!boot_cpu())
249 smm_initiate_relocation();