soc/intel/alderlake/acpi.c: Don't look up coreboot CPU index
[coreboot.git] / src / soc / intel / alderlake / acpi.c
blobc874067701d4a05d37ca242602d5f1fed3a78666
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpi.h>
4 #include <acpi/acpi_gnvs.h>
5 #include <acpi/acpigen.h>
6 #include <device/mmio.h>
7 #include <arch/smp/mpspec.h>
8 #include <console/console.h>
9 #include <device/device.h>
10 #include <device/pci_ops.h>
11 #include <intelblocks/cpulib.h>
12 #include <intelblocks/pmclib.h>
13 #include <intelblocks/acpi.h>
14 #include <soc/cpu.h>
15 #include <soc/iomap.h>
16 #include <soc/nvs.h>
17 #include <soc/pci_devs.h>
18 #include <soc/pm.h>
19 #include <soc/soc_chip.h>
20 #include <soc/systemagent.h>
21 #include <cpu/cpu.h>
22 #include <types.h>
25 #define DEFAULT_CPU_D_STATE D0
26 #define LPI_STATES_ALL 0xff
27 #define LPI_REVISION 0
28 #define LPI_ENABLED 1
32 * List of supported C-states in this processor.
34 enum {
35 C_STATE_C0, /* 0 */
36 C_STATE_C1, /* 1 */
37 C_STATE_C1E, /* 2 */
38 C_STATE_C6_SHORT_LAT, /* 3 */
39 C_STATE_C6_LONG_LAT, /* 4 */
40 C_STATE_C7_SHORT_LAT, /* 5 */
41 C_STATE_C7_LONG_LAT, /* 6 */
42 C_STATE_C7S_SHORT_LAT, /* 7 */
43 C_STATE_C7S_LONG_LAT, /* 8 */
44 C_STATE_C8, /* 9 */
45 C_STATE_C9, /* 10 */
46 C_STATE_C10, /* 11 */
47 NUM_C_STATES
50 static const acpi_cstate_t cstate_map[NUM_C_STATES] = {
51 [C_STATE_C0] = {},
52 [C_STATE_C1] = {
53 .latency = C1_LATENCY,
54 .power = C1_POWER,
55 .resource = MWAIT_RES(0, 0),
57 [C_STATE_C1E] = {
58 .latency = C1_LATENCY,
59 .power = C1_POWER,
60 .resource = MWAIT_RES(0, 1),
62 [C_STATE_C6_SHORT_LAT] = {
63 .latency = C6_LATENCY,
64 .power = C6_POWER,
65 .resource = MWAIT_RES(2, 0),
67 [C_STATE_C6_LONG_LAT] = {
68 .latency = C6_LATENCY,
69 .power = C6_POWER,
70 .resource = MWAIT_RES(2, 1),
72 [C_STATE_C7_SHORT_LAT] = {
73 .latency = C7_LATENCY,
74 .power = C7_POWER,
75 .resource = MWAIT_RES(3, 0),
77 [C_STATE_C7_LONG_LAT] = {
78 .latency = C7_LATENCY,
79 .power = C7_POWER,
80 .resource = MWAIT_RES(3, 1),
82 [C_STATE_C7S_SHORT_LAT] = {
83 .latency = C7_LATENCY,
84 .power = C7_POWER,
85 .resource = MWAIT_RES(3, 2),
87 [C_STATE_C7S_LONG_LAT] = {
88 .latency = C7_LATENCY,
89 .power = C7_POWER,
90 .resource = MWAIT_RES(3, 3),
92 [C_STATE_C8] = {
93 .latency = C8_LATENCY,
94 .power = C8_POWER,
95 .resource = MWAIT_RES(4, 0),
97 [C_STATE_C9] = {
98 .latency = C9_LATENCY,
99 .power = C9_POWER,
100 .resource = MWAIT_RES(5, 0),
102 [C_STATE_C10] = {
103 .latency = C10_LATENCY,
104 .power = C10_POWER,
105 .resource = MWAIT_RES(6, 0),
109 static int cstate_set_non_s0ix[] = {
110 C_STATE_C1,
111 C_STATE_C6_LONG_LAT,
112 C_STATE_C7S_LONG_LAT
115 static int cstate_set_s0ix[] = {
116 C_STATE_C1,
117 C_STATE_C6_LONG_LAT,
118 C_STATE_C10
121 enum dev_sleep_states {
122 D0, /* 0 */
123 D1, /* 1 */
124 D2, /* 2 */
125 D3, /* 3 */
126 NONE
129 const acpi_cstate_t *soc_get_cstate_map(size_t *entries)
131 static acpi_cstate_t map[MAX(ARRAY_SIZE(cstate_set_s0ix),
132 ARRAY_SIZE(cstate_set_non_s0ix))];
133 int *set;
134 int i;
136 config_t *config = config_of_soc();
138 int is_s0ix_enable = config->s0ix_enable;
140 if (is_s0ix_enable) {
141 *entries = ARRAY_SIZE(cstate_set_s0ix);
142 set = cstate_set_s0ix;
143 } else {
144 *entries = ARRAY_SIZE(cstate_set_non_s0ix);
145 set = cstate_set_non_s0ix;
148 for (i = 0; i < *entries; i++) {
149 map[i] = cstate_map[set[i]];
150 map[i].ctype = i + 1;
152 return map;
155 void soc_power_states_generation(int core_id, int cores_per_package)
157 config_t *config = config_of_soc();
159 if (config->eist_enable)
160 /* Generate P-state tables */
161 generate_p_state_entries(core_id, cores_per_package);
164 void soc_fill_fadt(acpi_fadt_t *fadt)
166 const uint16_t pmbase = ACPI_BASE_ADDRESS;
168 config_t *config = config_of_soc();
170 fadt->pm_tmr_blk = pmbase + PM1_TMR;
171 fadt->pm_tmr_len = 4;
172 fadt->x_pm_tmr_blk.space_id = ACPI_ADDRESS_SPACE_IO;
173 fadt->x_pm_tmr_blk.bit_width = fadt->pm_tmr_len * 8;
174 fadt->x_pm_tmr_blk.bit_offset = 0;
175 fadt->x_pm_tmr_blk.access_size = ACPI_ACCESS_SIZE_DWORD_ACCESS;
176 fadt->x_pm_tmr_blk.addrl = fadt->pm_tmr_blk;
177 fadt->x_pm_tmr_blk.addrh = 0x0;
179 if (config->s0ix_enable)
180 fadt->flags |= ACPI_FADT_LOW_PWR_IDLE_S0;
183 static const struct {
184 uint8_t pci_dev;
185 enum dev_sleep_states min_sleep_state;
186 } min_pci_sleep_states[] = {
187 { SA_DEVFN_ROOT, D3 },
188 { SA_DEVFN_CPU_PCIE1_0, D3 },
189 { SA_DEVFN_IGD, D3 },
190 { SA_DEVFN_DPTF, D3 },
191 { SA_DEVFN_IPU, D3 },
192 { SA_DEVFN_CPU_PCIE6_0, D3 },
193 { SA_DEVFN_CPU_PCIE6_2, D3 },
194 { SA_DEVFN_TBT0, D3 },
195 { SA_DEVFN_TBT1, D3 },
196 { SA_DEVFN_TBT2, D3 },
197 { SA_DEVFN_TBT3, D3 },
198 { SA_DEVFN_GNA, D3 },
199 { SA_DEVFN_TCSS_XHCI, D3 },
200 { SA_DEVFN_TCSS_XDCI, D3 },
201 { SA_DEVFN_TCSS_DMA0, D3 },
202 { SA_DEVFN_TCSS_DMA1, D3 },
203 { SA_DEVFN_VMD, D3 },
204 { PCH_DEVFN_I2C6, D3 },
205 { PCH_DEVFN_I2C7, D3 },
206 { PCH_DEVFN_THC0, D3 },
207 { PCH_DEVFN_THC1, D3 },
208 { PCH_DEVFN_XHCI, D3 },
209 { PCH_DEVFN_USBOTG, D3 },
210 { PCH_DEVFN_SRAM, D3 },
211 { PCH_DEVFN_CNVI_WIFI, D3 },
212 { PCH_DEVFN_I2C0, D3 },
213 { PCH_DEVFN_I2C1, D3 },
214 { PCH_DEVFN_I2C2, D3 },
215 { PCH_DEVFN_I2C3, D3 },
216 { PCH_DEVFN_CSE, D0 },
217 { PCH_DEVFN_SATA, D3 },
218 { PCH_DEVFN_I2C4, D3 },
219 { PCH_DEVFN_I2C5, D3 },
220 { PCH_DEVFN_UART2, D3 },
221 { PCH_DEVFN_PCIE1, D0 },
222 { PCH_DEVFN_PCIE2, D0 },
223 { PCH_DEVFN_PCIE3, D0 },
224 { PCH_DEVFN_PCIE4, D0 },
225 { PCH_DEVFN_PCIE5, D0 },
226 { PCH_DEVFN_PCIE6, D0 },
227 { PCH_DEVFN_PCIE7, D0 },
228 { PCH_DEVFN_PCIE8, D0 },
229 { PCH_DEVFN_PCIE9, D0 },
230 { PCH_DEVFN_PCIE10, D0 },
231 { PCH_DEVFN_PCIE11, D0 },
232 { PCH_DEVFN_PCIE12, D0 },
233 { PCH_DEVFN_UART0, D3 },
234 { PCH_DEVFN_UART1, D3 },
235 { PCH_DEVFN_GSPI0, D3 },
236 { PCH_DEVFN_GSPI1, D3 },
237 { PCH_DEVFN_ESPI, D0 },
238 { PCH_DEVFN_PMC, D0 },
239 { PCH_DEVFN_HDA, D0 },
240 { PCH_DEVFN_SPI, D3 },
241 { PCH_DEVFN_GBE, D3 },
244 static enum dev_sleep_states get_min_sleep_state(const struct device *dev)
246 if (!is_dev_enabled(dev))
247 return NONE;
249 switch (dev->path.type) {
250 case DEVICE_PATH_APIC:
251 return DEFAULT_CPU_D_STATE;
253 case DEVICE_PATH_PCI:
254 for (size_t i = 0; i < ARRAY_SIZE(min_pci_sleep_states); i++)
255 if (min_pci_sleep_states[i].pci_dev == dev->path.pci.devfn)
256 return min_pci_sleep_states[i].min_sleep_state;
257 printk(BIOS_WARNING, "Unknown min d_state for %x\n", dev->path.pci.devfn);
258 return NONE;
260 default:
261 return NONE;
265 /* Generate the LPI constraint table and return the number of devices included */
266 void soc_lpi_get_constraints(void *unused)
268 unsigned int num_entries;
269 const struct device *dev;
270 enum dev_sleep_states min_sleep_state;
272 num_entries = 0;
274 for (dev = all_devices; dev; dev = dev->next) {
275 if (get_min_sleep_state(dev) != NONE)
276 num_entries++;
279 acpigen_emit_byte(RETURN_OP);
280 acpigen_write_package(num_entries);
282 size_t cpu_index = 0;
283 for (dev = all_devices; dev; dev = dev->next) {
284 min_sleep_state = get_min_sleep_state(dev);
285 if (min_sleep_state == NONE)
286 continue;
288 acpigen_write_package(3);
290 char path[32] = { 0 };
291 /* Emit the device path */
292 switch (dev->path.type) {
293 case DEVICE_PATH_PCI:
294 acpigen_emit_namestring(acpi_device_path(dev));
295 break;
297 case DEVICE_PATH_APIC:
298 snprintf(path, sizeof(path), CONFIG_ACPI_CPU_STRING,
299 cpu_index++);
300 acpigen_emit_namestring(path);
301 break;
303 default:
304 /* Unhandled */
305 printk(BIOS_WARNING,
306 "Unhandled device path type %d\n", dev->path.type);
307 acpigen_emit_namestring(NULL);
308 break;
311 acpigen_write_integer(LPI_ENABLED);
312 acpigen_write_package(2);
314 acpigen_write_integer(LPI_REVISION);
315 acpigen_write_package(2); /* no optional device info */
317 /* Assume constraints apply to all entries */
318 acpigen_write_integer(LPI_STATES_ALL);
319 acpigen_write_integer(min_sleep_state); /* min D-state */
321 acpigen_write_package_end();
323 acpigen_write_package_end();
325 acpigen_write_package_end();
328 acpigen_write_package_end();
329 printk(BIOS_INFO, "Returning SoC specific constraint package for %d devices\n", num_entries);
332 uint32_t soc_read_sci_irq_select(void)
334 return read32p(soc_read_pmc_base() + IRQ_REG);
337 static unsigned long soc_fill_dmar(unsigned long current)
339 const uint64_t gfxvtbar = MCHBAR64(GFXVTBAR) & VTBAR_MASK;
340 const bool gfxvten = MCHBAR32(GFXVTBAR) & VTBAR_ENABLED;
342 if (is_devfn_enabled(SA_DEVFN_IGD) && gfxvtbar && gfxvten) {
343 const unsigned long tmp = current;
345 current += acpi_create_dmar_drhd(current, 0, 0, gfxvtbar);
346 current += acpi_create_dmar_ds_pci(current, 0, SA_DEV_SLOT_IGD, 0);
348 acpi_dmar_drhd_fixup(tmp, current);
351 const uint64_t ipuvtbar = MCHBAR64(IPUVTBAR) & VTBAR_MASK;
352 const bool ipuvten = MCHBAR32(IPUVTBAR) & VTBAR_ENABLED;
354 if (is_devfn_enabled(SA_DEVFN_IPU) && ipuvtbar && ipuvten) {
355 const unsigned long tmp = current;
357 current += acpi_create_dmar_drhd(current, 0, 0, ipuvtbar);
358 current += acpi_create_dmar_ds_pci(current, 0, SA_DEV_SLOT_IPU, 0);
360 acpi_dmar_drhd_fixup(tmp, current);
363 /* TCSS Thunderbolt root ports */
364 for (unsigned int i = 0; i < MAX_TBT_PCIE_PORT; i++) {
365 if (is_devfn_enabled(SA_DEVFN_TBT(i))) {
366 const uint64_t tbtbar = MCHBAR64(TBTxBAR(i)) & VTBAR_MASK;
367 const bool tbten = MCHBAR32(TBTxBAR(i)) & VTBAR_ENABLED;
368 if (tbtbar && tbten) {
369 const unsigned long tmp = current;
371 current += acpi_create_dmar_drhd(current, 0, 0, tbtbar);
372 current += acpi_create_dmar_ds_pci_br(current, 0,
373 SA_DEV_SLOT_TBT, i);
375 acpi_dmar_drhd_fixup(tmp, current);
380 const uint64_t vtvc0bar = MCHBAR64(VTVC0BAR) & VTBAR_MASK;
381 const bool vtvc0en = MCHBAR32(VTVC0BAR) & VTBAR_ENABLED;
383 if (vtvc0bar && vtvc0en) {
384 const unsigned long tmp = current;
386 current += acpi_create_dmar_drhd(current,
387 DRHD_INCLUDE_PCI_ALL, 0, vtvc0bar);
388 current += acpi_create_dmar_ds_ioapic(current,
389 2, V_P2SB_CFG_IBDF_BUS, V_P2SB_CFG_IBDF_DEV,
390 V_P2SB_CFG_IBDF_FUNC);
391 current += acpi_create_dmar_ds_msi_hpet(current,
392 0, V_P2SB_CFG_HBDF_BUS, V_P2SB_CFG_HBDF_DEV,
393 V_P2SB_CFG_HBDF_FUNC);
395 acpi_dmar_drhd_fixup(tmp, current);
398 /* Add RMRR entry */
399 if (is_devfn_enabled(SA_DEVFN_IGD)) {
400 const unsigned long tmp = current;
401 current += acpi_create_dmar_rmrr(current, 0,
402 sa_get_gsm_base(), sa_get_tolud_base() - 1);
403 current += acpi_create_dmar_ds_pci(current, 0, SA_DEV_SLOT_IGD, 0);
404 acpi_dmar_rmrr_fixup(tmp, current);
407 return current;
410 unsigned long sa_write_acpi_tables(const struct device *dev, unsigned long current,
411 struct acpi_rsdp *rsdp)
413 acpi_dmar_t *const dmar = (acpi_dmar_t *)current;
416 * Create DMAR table only if we have VT-d capability and FSP does not override its
417 * feature.
419 if ((pci_read_config32(dev, CAPID0_A) & VTD_DISABLE) ||
420 !(MCHBAR32(VTVC0BAR) & VTBAR_ENABLED))
421 return current;
423 printk(BIOS_DEBUG, "ACPI: * DMAR\n");
424 acpi_create_dmar(dmar, DMAR_INTR_REMAP | DMA_CTRL_PLATFORM_OPT_IN_FLAG, soc_fill_dmar);
425 current += dmar->header.length;
426 current = acpi_align_current(current);
427 acpi_add_table(rsdp, dmar);
429 return current;
432 void soc_fill_gnvs(struct global_nvs *gnvs)
434 config_t *config = config_of_soc();
436 /* Enable DPTF based on mainboard configuration */
437 gnvs->dpte = config->dptf_enable;
439 /* Set USB2/USB3 wake enable bitmaps. */
440 gnvs->u2we = config->usb2_wake_enable_bitmap;
441 gnvs->u3we = config->usb3_wake_enable_bitmap;
444 int soc_madt_sci_irq_polarity(int sci)
446 return MP_IRQ_POLARITY_HIGH;