mb/system76/cml-u/dt: Make use of chipset devicetree
[coreboot.git] / src / soc / intel / common / block / cpu / mp_init.c
blobd5cc88397397e621f2a3e4fbc055e02734f78a03
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <assert.h>
4 #include <bootstate.h>
5 #include <console/console.h>
6 #include <cpu/cpu.h>
7 #include <cpu/x86/mtrr.h>
8 #include <cpu/x86/mp.h>
9 #include <cpu/intel/microcode.h>
10 #include <intelblocks/cfg.h>
11 #include <intelblocks/cpulib.h>
12 #include <intelblocks/fast_spi.h>
13 #include <intelblocks/mp_init.h>
14 #include <intelblocks/msr.h>
15 #include <soc/cpu.h>
17 static void initialize_microcode(void)
19 const void *microcode_patch = intel_microcode_find();
20 intel_microcode_load_unlocked(microcode_patch);
23 static void init_one_cpu(struct device *dev)
25 soc_core_init(dev);
27 initialize_microcode();
30 static struct device_operations cpu_dev_ops = {
31 .init = init_one_cpu,
34 static const struct cpu_device_id cpu_table[] = {
35 { X86_VENDOR_INTEL, CPUID_PANTHERLAKE_A0, CPUID_EXACT_MATCH_MASK },
36 { X86_VENDOR_INTEL, CPUID_LUNARLAKE_A0_1, CPUID_EXACT_MATCH_MASK },
37 { X86_VENDOR_INTEL, CPUID_LUNARLAKE_A0_2, CPUID_EXACT_MATCH_MASK },
38 { X86_VENDOR_INTEL, CPUID_METEORLAKE_A0_1, CPUID_EXACT_MATCH_MASK },
39 { X86_VENDOR_INTEL, CPUID_METEORLAKE_A0_2, CPUID_EXACT_MATCH_MASK },
40 { X86_VENDOR_INTEL, CPUID_METEORLAKE_B0, CPUID_EXACT_MATCH_MASK },
41 { X86_VENDOR_INTEL, CPUID_METEORLAKE_C0, CPUID_EXACT_MATCH_MASK },
42 { X86_VENDOR_INTEL, CPUID_SKYLAKE_C0, CPUID_EXACT_MATCH_MASK },
43 { X86_VENDOR_INTEL, CPUID_SKYLAKE_D0, CPUID_EXACT_MATCH_MASK },
44 { X86_VENDOR_INTEL, CPUID_SKYLAKE_HQ0, CPUID_EXACT_MATCH_MASK },
45 { X86_VENDOR_INTEL, CPUID_SKYLAKE_HR0, CPUID_EXACT_MATCH_MASK },
46 { X86_VENDOR_INTEL, CPUID_KABYLAKE_G0, CPUID_EXACT_MATCH_MASK },
47 { X86_VENDOR_INTEL, CPUID_KABYLAKE_H0, CPUID_EXACT_MATCH_MASK },
48 { X86_VENDOR_INTEL, CPUID_KABYLAKE_Y0, CPUID_EXACT_MATCH_MASK },
49 { X86_VENDOR_INTEL, CPUID_KABYLAKE_HA0, CPUID_EXACT_MATCH_MASK },
50 { X86_VENDOR_INTEL, CPUID_KABYLAKE_HB0, CPUID_EXACT_MATCH_MASK },
51 { X86_VENDOR_INTEL, CPUID_CANNONLAKE_A0, CPUID_EXACT_MATCH_MASK },
52 { X86_VENDOR_INTEL, CPUID_CANNONLAKE_B0, CPUID_EXACT_MATCH_MASK },
53 { X86_VENDOR_INTEL, CPUID_CANNONLAKE_C0, CPUID_EXACT_MATCH_MASK },
54 { X86_VENDOR_INTEL, CPUID_CANNONLAKE_D0, CPUID_EXACT_MATCH_MASK },
55 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0, CPUID_EXACT_MATCH_MASK },
56 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0, CPUID_EXACT_MATCH_MASK },
57 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0, CPUID_EXACT_MATCH_MASK },
58 { X86_VENDOR_INTEL, CPUID_GLK_A0, CPUID_EXACT_MATCH_MASK },
59 { X86_VENDOR_INTEL, CPUID_GLK_B0, CPUID_EXACT_MATCH_MASK },
60 { X86_VENDOR_INTEL, CPUID_GLK_R0, CPUID_EXACT_MATCH_MASK },
61 { X86_VENDOR_INTEL, CPUID_WHISKEYLAKE_V0, CPUID_EXACT_MATCH_MASK },
62 { X86_VENDOR_INTEL, CPUID_WHISKEYLAKE_W0, CPUID_EXACT_MATCH_MASK },
63 { X86_VENDOR_INTEL, CPUID_COFFEELAKE_U0, CPUID_EXACT_MATCH_MASK },
64 { X86_VENDOR_INTEL, CPUID_COFFEELAKE_B0, CPUID_EXACT_MATCH_MASK },
65 { X86_VENDOR_INTEL, CPUID_COFFEELAKE_P0, CPUID_EXACT_MATCH_MASK },
66 { X86_VENDOR_INTEL, CPUID_COFFEELAKE_R0, CPUID_EXACT_MATCH_MASK },
67 { X86_VENDOR_INTEL, CPUID_COMETLAKE_U_A0, CPUID_EXACT_MATCH_MASK },
68 { X86_VENDOR_INTEL, CPUID_COMETLAKE_U_K0_S0, CPUID_EXACT_MATCH_MASK },
69 { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_6_2_G0, CPUID_EXACT_MATCH_MASK },
70 { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_6_2_G1, CPUID_EXACT_MATCH_MASK },
71 { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_10_2_P0, CPUID_EXACT_MATCH_MASK },
72 { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_10_2_P1, CPUID_EXACT_MATCH_MASK },
73 { X86_VENDOR_INTEL, CPUID_COMETLAKE_H_S_10_2_Q0, CPUID_EXACT_MATCH_MASK },
74 { X86_VENDOR_INTEL, CPUID_TIGERLAKE_A0, CPUID_EXACT_MATCH_MASK },
75 { X86_VENDOR_INTEL, CPUID_TIGERLAKE_B0, CPUID_EXACT_MATCH_MASK },
76 { X86_VENDOR_INTEL, CPUID_TIGERLAKE_P0, CPUID_EXACT_MATCH_MASK },
77 { X86_VENDOR_INTEL, CPUID_TIGERLAKE_R0, CPUID_EXACT_MATCH_MASK },
78 { X86_VENDOR_INTEL, CPUID_ELKHARTLAKE_A0, CPUID_EXACT_MATCH_MASK },
79 { X86_VENDOR_INTEL, CPUID_ELKHARTLAKE_B0, CPUID_EXACT_MATCH_MASK },
80 { X86_VENDOR_INTEL, CPUID_JASPERLAKE_A0, CPUID_EXACT_MATCH_MASK },
81 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_A0, CPUID_EXACT_MATCH_MASK },
82 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_B0, CPUID_EXACT_MATCH_MASK },
83 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_C0, CPUID_EXACT_MATCH_MASK },
84 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_G0, CPUID_EXACT_MATCH_MASK },
85 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_H0, CPUID_EXACT_MATCH_MASK },
86 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_J0, CPUID_EXACT_MATCH_MASK },
87 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_K0, CPUID_EXACT_MATCH_MASK },
88 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_L0, CPUID_EXACT_MATCH_MASK },
89 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_Q0, CPUID_EXACT_MATCH_MASK },
90 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_R0, CPUID_EXACT_MATCH_MASK },
91 { X86_VENDOR_INTEL, CPUID_ALDERLAKE_N_A0, CPUID_EXACT_MATCH_MASK },
92 { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_J0, CPUID_EXACT_MATCH_MASK },
93 { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_Q0, CPUID_EXACT_MATCH_MASK },
94 { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_A0, CPUID_EXACT_MATCH_MASK },
95 { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_B0, CPUID_EXACT_MATCH_MASK },
96 { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_C0, CPUID_EXACT_MATCH_MASK },
97 { X86_VENDOR_INTEL, CPUID_RAPTORLAKE_H0, CPUID_EXACT_MATCH_MASK },
98 CPU_TABLE_END
101 static const struct cpu_driver driver __cpu_driver = {
102 .ops = &cpu_dev_ops,
103 .id_table = cpu_table,
107 * MP Init callback function to Find CPU Topology. This function is common
108 * among all SOCs and thus its in Common CPU block.
110 int get_cpu_count(void)
112 unsigned int num_virt_cores, num_phys_cores;
114 cpu_read_topology(&num_phys_cores, &num_virt_cores);
116 printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n",
117 num_phys_cores, num_virt_cores);
119 return num_virt_cores;
123 * MP Init callback function(get_microcode_info) to find the Microcode at
124 * Pre MP Init phase. This function is common among all SOCs and thus its in
125 * Common CPU block.
126 * This function also fills in the microcode patch (in *microcode), and also
127 * sets the argument *parallel to 1, which allows microcode loading in all
128 * APs to occur in parallel during MP Init.
130 void get_microcode_info(const void **microcode, int *parallel)
132 *microcode = intel_microcode_find();
133 *parallel = 1;
137 * Perform BSP and AP initialization
138 * This function can be called in below cases:
139 * 1. During coreboot is doing MP initialization as part of BS_DEV_INIT_CHIPS (exclude
140 * this call if user has selected USE_INTEL_FSP_MP_INIT).
141 * 2. coreboot would like to take APs control back after FSP-S has done with MP
142 * initialization based on user select USE_INTEL_FSP_MP_INIT.
144 * This function would use cpu_cluster as a device and APIC device as a linked list to
145 * the cpu cluster. This function adds a node in case the mainboard doesn't have a lapic id
146 * hardcoded in devicetree, and then fills with the actual BSP APIC ID.
147 * This allows coreboot to dynamically detect the LAPIC ID of BSP.
148 * In case the mainboard has an APIC ID defined in devicetree, a link will be present and
149 * creation of the new node will be skipped. This node will have the APIC ID defined
150 * in devicetree.
152 void init_cpus(void)
154 struct device *dev = dev_find_path(NULL, DEVICE_PATH_CPU_CLUSTER);
155 assert(dev != NULL);
157 mp_cpu_bus_init(dev);
160 static void coreboot_init_cpus(void *unused)
162 if (CONFIG(USE_INTEL_FSP_MP_INIT))
163 return;
165 initialize_microcode();
167 init_cpus();
170 static void post_cpus_add_romcache(void)
172 if (!CONFIG(BOOT_DEVICE_MEMORY_MAPPED))
173 return;
175 fast_spi_cache_bios_region();
178 static void wrapper_x86_setup_mtrrs(void *unused)
180 x86_setup_mtrrs_with_detect();
183 static void wrapper_set_bios_done(void *unused)
185 cpu_soc_bios_done();
188 static void wrapper_init_core_prmrr(void *unused)
190 init_core_prmrr();
193 void before_post_cpus_init(void)
196 * Ensure all APs finish the task and continue if coreboot decides to
197 * perform multiprocessor initialization using native coreboot drivers
198 * instead using FSP MP PPI implementation.
200 * Ignore if USE_COREBOOT_MP_INIT is not enabled.
202 if (!CONFIG(USE_COREBOOT_MP_INIT))
203 return;
205 if (mp_run_on_all_cpus(wrapper_init_core_prmrr, NULL) != CB_SUCCESS)
206 printk(BIOS_ERR, "core PRMRR sync failure\n");
208 if (mp_run_on_all_cpus(wrapper_set_bios_done, NULL) != CB_SUCCESS)
209 printk(BIOS_ERR, "Set BIOS Done failure\n");
211 intel_reload_microcode();
214 /* Ensure to re-program all MTRRs based on DRAM resource settings */
215 static void post_cpus_init(void *unused)
217 /* Ensure all APs finish the task and continue */
218 if (mp_run_on_all_cpus_synchronously(&wrapper_x86_setup_mtrrs, NULL) != CB_SUCCESS)
219 printk(BIOS_ERR, "MTRR programming failure\n");
221 post_cpus_add_romcache();
222 x86_mtrr_check();
225 /* Do CPU MP Init before FSP Silicon Init */
226 BOOT_STATE_INIT_ENTRY(BS_DEV_INIT_CHIPS, BS_ON_ENTRY, coreboot_init_cpus, NULL);
227 BOOT_STATE_INIT_ENTRY(BS_WRITE_TABLES, BS_ON_EXIT, post_cpus_init, NULL);
228 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY, post_cpus_init, NULL);