1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
5 #include <cpu/intel/common/common.h>
6 #include <cpu/intel/em64t100_save_state.h>
7 #include <cpu/intel/microcode.h>
8 #include <cpu/intel/smm_reloc.h>
9 #include <cpu/intel/turbo.h>
10 #include <cpu/x86/lapic.h>
11 #include <cpu/x86/mp.h>
12 #include <cpu/x86/msr.h>
13 #include <cpu/x86/mtrr.h>
14 #include <cpu/x86/smm.h>
15 #include <device/device.h>
16 #include <reg_script.h>
19 #include <soc/pattrs.h>
20 #include <soc/ramstage.h>
24 static const struct reg_script core_msr_script
[] = {
25 /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */
26 REG_MSR_RMW(MSR_PKG_CST_CONFIG_CONTROL
, ~0x3f080f, 0xe0008),
27 REG_MSR_RMW(MSR_POWER_MISC
, ~(ENABLE_ULFM_AUTOCM_MASK
| ENABLE_INDP_AUTOCM_MASK
), 0),
30 REG_MSR_RMW(MSR_POWER_CTL
, ~0x2, 0),
31 REG_MSR_OR(MSR_POWER_MISC
, 0x44),
35 static void soc_core_init(struct device
*cpu
)
37 printk(BIOS_DEBUG
, "Init Braswell core.\n");
40 * The turbo disable bit is actually scoped at building block level -- not package.
41 * For non-BSP cores that are within a building block, enable turbo. The cores within
42 * the BSP's building block will just see it already enabled and move on.
47 /* Set virtualization based on Kconfig option */
51 reg_script_run(core_msr_script
);
53 /* Set this core to max frequency ratio */
57 static struct device_operations cpu_dev_ops
= {
58 .init
= soc_core_init
,
61 static const struct cpu_device_id cpu_table
[] = {
62 { X86_VENDOR_INTEL
, 0x406c4, CPUID_EXACT_MATCH_MASK
},
63 { X86_VENDOR_INTEL
, 0x406c3, CPUID_EXACT_MATCH_MASK
},
64 { X86_VENDOR_INTEL
, 0x406c2, CPUID_EXACT_MATCH_MASK
},
68 static const struct cpu_driver driver __cpu_driver
= {
70 .id_table
= cpu_table
,
74 * MP and SMM loading initialization.
77 /* Package level MSRs */
78 static const struct reg_script package_msr_script
[] = {
79 /* Set Package TDP to ~7W */
80 REG_MSR_WRITE(MSR_PKG_POWER_LIMIT
, 0x3880fa),
81 REG_MSR_RMW(MSR_PP1_POWER_LIMIT
, ~(0x7f << 17), 0),
82 REG_MSR_WRITE(MSR_PKG_TURBO_CFG1
, 0x702),
83 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1
, 0x200b),
84 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2
, 0),
85 REG_MSR_WRITE(MSR_CPU_THERM_CFG1
, 0x00000305),
86 REG_MSR_WRITE(MSR_CPU_THERM_CFG2
, 0x0405500d),
87 REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG
, 0x27),
91 static void pre_mp_init(void)
95 /* Set up MTRRs based on physical address size. */
96 x86_setup_mtrrs_with_detect();
100 * Configure the BUNIT to allow dirty cache line evictions in non-SMM mode for lines
101 * that were dirtied while in SMM mode. Otherwise the writes would be silently dropped.
103 bsmrwac
= iosf_bunit_read(BUNIT_SMRWAC
) | SAI_IA_UNTRUSTED
;
104 iosf_bunit_write(BUNIT_SMRWAC
, bsmrwac
);
106 /* Set package MSRs */
107 reg_script_run(package_msr_script
);
109 /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */
113 static int get_cpu_count(void)
115 const struct pattrs
*pattrs
= pattrs_get();
117 return pattrs
->num_cpus
;
120 static void fill_in_relocation_params(struct smm_relocation_params
*params
)
125 /* All range registers are aligned to 4KiB */
126 const u32 rmask
= ~((1 << 12) - 1);
128 smm_region(&tseg_base
, &tseg_size
);
130 /* SMRR has 32-bits of valid address aligned to 4KiB. */
131 params
->smrr_base
.lo
= (tseg_base
& rmask
) | MTRR_TYPE_WRBACK
;
132 params
->smrr_base
.hi
= 0;
133 params
->smrr_mask
.lo
= (~(tseg_size
- 1) & rmask
) | MTRR_PHYS_MASK_VALID
;
134 params
->smrr_mask
.hi
= 0;
137 static void get_smm_info(uintptr_t *perm_smbase
, size_t *perm_smsize
,
138 size_t *smm_save_state_size
)
140 printk(BIOS_DEBUG
, "Setting up SMI for CPU\n");
142 fill_in_relocation_params(&smm_reloc_params
);
144 smm_subregion(SMM_SUBREGION_HANDLER
, perm_smbase
, perm_smsize
);
146 *smm_save_state_size
= sizeof(em64t100_smm_state_save_area_t
);
149 static void get_microcode_info(const void **microcode
, int *parallel
)
151 const struct pattrs
*pattrs
= pattrs_get();
153 *microcode
= pattrs
->microcode_patch
;
154 *parallel
= !intel_ht_supported();
157 static void per_cpu_smm_trigger(void)
159 const struct pattrs
*pattrs
= pattrs_get();
162 /* Need to make sure that all cores have microcode loaded. */
163 msr_value
= rdmsr(IA32_BIOS_SIGN_ID
);
164 if (msr_value
.hi
== 0)
165 intel_microcode_load_unlocked(pattrs
->microcode_patch
);
167 /* Relocate SMM space. */
168 smm_initiate_relocation();
170 /* Load microcode after SMM relocation. */
171 intel_microcode_load_unlocked(pattrs
->microcode_patch
);
174 static void relocation_handler(int cpu
, uintptr_t curr_smbase
, uintptr_t staggered_smbase
)
176 struct smm_relocation_params
*relo_params
= &smm_reloc_params
;
177 em64t100_smm_state_save_area_t
*smm_state
;
180 wrmsr(IA32_SMRR_PHYS_BASE
, relo_params
->smrr_base
);
181 wrmsr(IA32_SMRR_PHYS_MASK
, relo_params
->smrr_mask
);
183 smm_state
= (void *)(SMM_EM64T100_SAVE_STATE_OFFSET
+ curr_smbase
);
184 smm_state
->smbase
= staggered_smbase
;
187 static void post_mp_init(void)
192 static const struct mp_ops mp_ops
= {
193 .pre_mp_init
= pre_mp_init
,
194 .get_cpu_count
= get_cpu_count
,
195 .get_smm_info
= get_smm_info
,
196 .get_microcode_info
= get_microcode_info
,
197 .pre_mp_smm_init
= smm_southbridge_clear_state
,
198 .per_cpu_smm_trigger
= per_cpu_smm_trigger
,
199 .relocation_handler
= relocation_handler
,
200 .post_mp_init
= post_mp_init
,
203 void mp_init_cpus(struct bus
*cpu_bus
)
205 /* TODO: Handle mp_init_with_smm failure? */
206 mp_init_with_smm(cpu_bus
, &mp_ops
);