1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <acpi/acpigen.h>
5 #include <arch/ioapic.h>
7 #include <cpu/x86/lapic.h>
8 #include <commonlib/sort.h>
9 #include <device/mmio.h>
10 #include <device/pci.h>
11 #include <device/pciexp.h>
12 #include <device/pci_ids.h>
14 #include <soc/chip_common.h>
16 #include <soc/iomap.h>
18 #include <soc/pci_devs.h>
19 #include <soc/soc_util.h>
23 /* NUMA related ACPI table generation. SRAT, SLIT, etc */
25 /* Increase if necessary. Currently all x86 CPUs only have 2 SMP threads */
28 unsigned long acpi_create_srat_lapics(unsigned long current
)
31 unsigned int num_cpus
= 0;
32 int apic_ids
[CONFIG_MAX_CPUS
] = {};
34 unsigned int sort_start
= 0;
35 for (unsigned int thread_id
= 0; thread_id
< MAX_THREAD
; thread_id
++) {
36 for (cpu
= all_devices
; cpu
; cpu
= cpu
->next
) {
37 if (!is_enabled_cpu(cpu
))
39 if (num_cpus
>= ARRAY_SIZE(apic_ids
))
41 if (cpu
->path
.apic
.thread_id
!= thread_id
)
43 apic_ids
[num_cpus
++] = cpu
->path
.apic
.apic_id
;
45 bubblesort(&apic_ids
[sort_start
], num_cpus
- sort_start
, NUM_ASCENDING
);
46 sort_start
= num_cpus
;
49 for (unsigned int i
= 0; i
< num_cpus
; i
++) {
50 /* Match the sorted apic_ids to a struct device */
51 for (cpu
= all_devices
; cpu
; cpu
= cpu
->next
) {
52 if (!is_enabled_cpu(cpu
))
54 if (cpu
->path
.apic
.apic_id
== apic_ids
[i
])
60 if (is_x2apic_mode()) {
61 printk(BIOS_DEBUG
, "SRAT: x2apic cpu_index=%04x, node_id=%02x, apic_id=%08x\n",
62 i
, device_to_pd(cpu
), cpu
->path
.apic
.apic_id
);
64 current
+= acpi_create_srat_x2apic((acpi_srat_x2apic_t
*)current
,
65 device_to_pd(cpu
), cpu
->path
.apic
.apic_id
);
67 printk(BIOS_DEBUG
, "SRAT: lapic cpu_index=%02x, node_id=%02x, apic_id=%02x\n",
68 i
, device_to_pd(cpu
), cpu
->path
.apic
.apic_id
);
70 current
+= acpi_create_srat_lapic((acpi_srat_lapic_t
*)current
,
71 device_to_pd(cpu
), cpu
->path
.apic
.apic_id
);
77 static void acpi_fill_srat_memory(int *cnt
, acpi_srat_mem_t
*current
,
78 const struct SystemMemoryMapElement
*e
)
83 addr
= ((uint64_t)e
->BaseAddress
<< MEM_ADDR_64MB_SHIFT_BITS
);
84 size
= ((uint64_t)e
->ElementSize
<< MEM_ADDR_64MB_SHIFT_BITS
);
86 printk(BIOS_DEBUG
, "SRAT: sysmemmap addr: 0x%llx, BaseAddress: 0x%x, size: 0x%llx, "
87 "ElementSize: 0x%x, type: %d, reserved: %d\n", addr
, e
->BaseAddress
,
88 size
, e
->ElementSize
, e
->Type
, is_memtype_reserved(e
->Type
));
90 /* skip reserved memory region */
91 if (is_memtype_reserved(e
->Type
))
94 /* skip all non processor attached memory regions */
95 if (CONFIG(SOC_INTEL_HAS_CXL
) &&
96 (!is_memtype_processor_attached(e
->Type
)))
99 /* Prepare ACPI table entry */
100 memset(&srat
, 0, sizeof(acpi_srat_mem_t
));
102 srat
.type
= 1; /* Memory affinity structure */
103 srat
.length
= sizeof(acpi_srat_mem_t
);
104 srat
.base_address_low
= (uint32_t)(addr
& 0xffffffff);
105 srat
.base_address_high
= (uint32_t)(addr
>> 32);
106 srat
.length_low
= (uint32_t)(size
& 0xffffffff);
107 srat
.length_high
= (uint32_t)(size
>> 32);
108 srat
.proximity_domain
= memory_to_pd(e
);
109 srat
.flags
= ACPI_SRAT_MEMORY_ENABLED
;
110 if (is_memtype_non_volatile(e
->Type
))
111 srat
.flags
|= ACPI_SRAT_MEMORY_NONVOLATILE
;
113 /* skip if this address is already added */
115 for (int i
= 0; i
< *cnt
; i
++) {
116 if ((srat
.base_address_high
== current
[-i
].base_address_high
) &&
117 (srat
.base_address_low
== current
[-i
].base_address_low
)) {
124 printk(BIOS_DEBUG
, "SRAT: adding memory %d entry length: %d, addr: 0x%x%x, "
125 "length: 0x%x%x, proximity_domain: %d, flags: %x\n",
126 *cnt
, srat
.length
, srat
.base_address_high
, srat
.base_address_low
,
127 srat
.length_high
, srat
.length_low
, srat
.proximity_domain
, srat
.flags
);
129 memcpy(current
, &srat
, sizeof(acpi_srat_mem_t
));
134 static unsigned long acpi_fill_srat(unsigned long current
)
136 const struct SystemMemoryMapHob
*memory_map
;
137 acpi_srat_mem_t
*acpi_srat
;
140 memory_map
= get_system_memory_map();
142 printk(BIOS_DEBUG
, "SRAT: memory_map: %p\n", memory_map
);
144 /* create all subtables for processors */
145 current
= acpi_create_srat_lapics(current
);
146 acpi_srat
= (acpi_srat_mem_t
*)current
;
148 for (int i
= 0; i
< memory_map
->numberEntries
; ++i
) {
149 const struct SystemMemoryMapElement
*e
= &memory_map
->Element
[i
];
150 acpi_fill_srat_memory(&cnt
, &acpi_srat
[cnt
], e
);
152 printk(BIOS_DEBUG
, "SRAT: Added %d memory entries\n", cnt
);
154 current
= (unsigned long)&acpi_srat
[cnt
];
156 if (CONFIG(SOC_INTEL_HAS_CXL
))
157 current
= cxl_fill_srat(current
);
162 #if CONFIG(SOC_INTEL_SAPPHIRERAPIDS_SP)
164 Because pds.num_pds comes from spr/numa.c function fill_pds().
165 pds.num_pds = soc_get_num_cpus() + get_cxl_node_count().
167 /* SPR-SP platform has Generic Initiator domain in addition to processor domain */
168 static unsigned long acpi_fill_slit(unsigned long current
)
170 uint8_t *p
= (uint8_t *)current
;
171 /* According to table 5.60 of ACPI 6.4 spec, "Number of System Localities" field takes
172 up 8 bytes. Following that, each matrix entry takes up 1 byte. */
173 memset(p
, 0, 8 + pds
.num_pds
* pds
.num_pds
);
174 *p
= (uint8_t)pds
.num_pds
;
177 for (int i
= 0; i
< pds
.num_pds
; i
++) {
178 for (int j
= 0; j
< pds
.num_pds
; j
++)
179 p
[i
* pds
.num_pds
+ j
] = pds
.pds
[i
].distances
[j
];
182 current
+= 8 + pds
.num_pds
* pds
.num_pds
;
186 static unsigned long acpi_fill_slit(unsigned long current
)
188 unsigned int nodes
= soc_get_num_cpus();
190 uint8_t *p
= (uint8_t *)current
;
191 memset(p
, 0, 8 + nodes
* nodes
);
195 /* this assumes fully connected socket topology */
196 for (int i
= 0; i
< nodes
; i
++) {
197 for (int j
= 0; j
< nodes
; j
++) {
205 current
+= 8 + nodes
* nodes
;
211 * This function adds PCIe bridge device entry in DMAR table. If it is called
212 * in the context of ATSR subtable, it adds ATSR subtable when it is first called.
214 static unsigned long acpi_create_dmar_ds_pci_br_for_port(unsigned long current
,
215 const struct device
*bridge_dev
,
217 bool is_atsr
, bool *first
)
219 const uint32_t bus
= bridge_dev
->upstream
->secondary
;
220 const uint32_t dev
= PCI_SLOT(bridge_dev
->path
.pci
.devfn
);
221 const uint32_t func
= PCI_FUNC(bridge_dev
->path
.pci
.devfn
);
226 unsigned long atsr_size
= 0;
227 unsigned long pci_br_size
= 0;
228 if (is_atsr
== true && first
&& *first
== true) {
229 printk(BIOS_DEBUG
, "[Root Port ATS Capability] Flags: 0x%x, "
230 "PCI Segment Number: 0x%x\n", 0, pcie_seg
);
231 atsr_size
= acpi_create_dmar_atsr(current
, 0, pcie_seg
);
235 printk(BIOS_DEBUG
, " [PCI Bridge Device] %s\n", dev_path(bridge_dev
));
236 pci_br_size
= acpi_create_dmar_ds_pci_br(current
+ atsr_size
, bus
, dev
, func
);
238 return (atsr_size
+ pci_br_size
);
241 static unsigned long acpi_create_drhd(unsigned long current
, struct device
*iommu
,
244 unsigned long tmp
= current
;
246 struct resource
*resource
;
247 resource
= probe_resource(iommu
, VTD_BAR_CSR
);
248 if (!resource
|| !resource
->base
|| !resource
->size
)
251 const uint32_t bus
= iommu
->upstream
->secondary
;
252 uint32_t pcie_seg
= iommu
->upstream
->segment_group
;
253 int socket
= iio_pci_domain_socket_from_dev(iommu
);
254 int stack
= iio_pci_domain_stack_from_dev(iommu
);
256 printk(BIOS_SPEW
, "%s socket: %d, stack: %d, bus: 0x%x, pcie_seg: 0x%x, reg_base: 0x%llx\n",
257 __func__
, socket
, stack
, bus
, pcie_seg
, resource
->base
);
260 * Add DRHD Hardware Unit
261 * For IBL platforms, domain0 is not PCH stack and not recommended to set
262 * DRHD_INCLUDE_PCI_ALL
265 uint8_t flags
= ((!CONFIG(SOC_INTEL_COMMON_IBL_BASE
)) && is_dev_on_domain0(iommu
)) ?
266 DRHD_INCLUDE_PCI_ALL
: 0;
268 printk(BIOS_DEBUG
, "[Hardware Unit Definition] Flags: 0x%x, PCI Segment Number: 0x%x, "
269 "Register Base Address: 0x%llx\n",
270 flags
, pcie_seg
, resource
->base
);
271 current
+= acpi_create_dmar_drhd(current
, flags
, pcie_seg
, resource
->base
,
275 if (is_dev_on_domain0(iommu
)) {
276 union p2sb_bdf ioapic_bdf
= soc_get_ioapic_bdf();
277 printk(BIOS_DEBUG
, " [IOAPIC Device] Enumeration ID: 0x%x, PCI Bus Number: 0x%x, "
278 "PCI Path: 0x%x, 0x%x\n", get_ioapic_id(IO_APIC_ADDR
), ioapic_bdf
.bus
,
279 ioapic_bdf
.dev
, ioapic_bdf
.fn
);
280 current
+= acpi_create_dmar_ds_ioapic_from_hw(current
,
281 IO_APIC_ADDR
, ioapic_bdf
.bus
, ioapic_bdf
.dev
, ioapic_bdf
.fn
);
284 /* SPR and later SoCs have no per stack IOAPIC */
285 #if CONFIG(SOC_INTEL_SKYLAKE_SP) || CONFIG(SOC_INTEL_COOPERLAKE_SP)
288 enum_id
= soc_get_iio_ioapicid(socket
, stack
);
289 printk(BIOS_DEBUG
, " [IOAPIC Device] Enumeration ID: 0x%x, PCI Bus Number: 0x%x, "
290 "PCI Path: 0x%x, 0x%x\n", enum_id
, bus
, APIC_DEV_NUM
, APIC_FUNC_NUM
);
291 current
+= acpi_create_dmar_ds_ioapic(current
, enum_id
, bus
,
292 APIC_DEV_NUM
, APIC_FUNC_NUM
);
295 if (flags
!= DRHD_INCLUDE_PCI_ALL
) {
297 const struct device
*domain
= dev_get_domain(iommu
);
298 struct device
*dev
= NULL
;
299 while ((dev
= dev_bus_each_child(domain
->downstream
, dev
)))
300 if (is_pci_bridge(dev
))
302 acpi_create_dmar_ds_pci_br_for_port(
303 current
, dev
, pcie_seg
, false, NULL
);
305 // Add PCIe end points
307 while ((dev
= dev_find_all_devices_on_stack(socket
, stack
,
308 XEONSP_VENDOR_MAX
, XEONSP_DEVICE_MAX
, dev
))) {
309 const uint32_t b
= dev
->upstream
->secondary
;
310 const uint32_t d
= PCI_SLOT(dev
->path
.pci
.devfn
);
311 const uint32_t f
= PCI_FUNC(dev
->path
.pci
.devfn
);
312 struct device
*upstream_dev
= dev
->upstream
->dev
;
314 if (is_pci_bridge(dev
))
317 if (upstream_dev
->path
.type
!= DEVICE_PATH_DOMAIN
)
320 printk(BIOS_DEBUG
, " [PCIE Endpoint Device] %s\n", dev_path(dev
));
321 current
+= acpi_create_dmar_ds_pci(current
, b
, d
, f
);
326 if (is_dev_on_domain0(iommu
)) {
327 uint16_t hpet_capid
= read16p(HPET_BASE_ADDRESS
+ HPET_GEN_CAP_ID
);
328 // Bits [8:12] has hpet count
329 uint16_t num_hpets
= (hpet_capid
>> HPET_NUM_TIM_CAP_SHIFT
) & HPET_NUM_TIM_CAP_MASK
;
330 printk(BIOS_SPEW
, "%s hpet_capid: 0x%x, num_hpets: 0x%x\n",
331 __func__
, hpet_capid
, num_hpets
);
333 if (num_hpets
&& (num_hpets
!= 0x1f) &&
334 (read32p(HPET_BASE_ADDRESS
+ HPET_TMR0_CNF_CAP
) & (HPET_TIMER_FSB_EN_CNF_MASK
))) {
335 union p2sb_bdf hpet_bdf
= soc_get_hpet_bdf();
336 printk(BIOS_DEBUG
, " [Message-capable HPET Device] Enumeration ID: 0x%x, "
337 "PCI Bus Number: 0x%x, PCI Path: 0x%x, 0x%x\n",
338 0, hpet_bdf
.bus
, hpet_bdf
.dev
, hpet_bdf
.fn
);
339 current
+= acpi_create_dmar_ds_msi_hpet(current
, 0, hpet_bdf
.bus
,
340 hpet_bdf
.dev
, hpet_bdf
.fn
);
344 acpi_dmar_drhd_fixup(tmp
, current
);
349 static unsigned long acpi_create_atsr(unsigned long current
)
351 struct device
*child
, *dev
;
352 struct resource
*resource
;
355 * The assumption made here is that the host bridges on a socket share the
356 * PCI segment group and thus only one ATSR header needs to be emitted for
358 * This is easier than to sort the host bridges by PCI segment group first
359 * and then generate one ATSR header for every new segment.
361 for (int socket
= 0; socket
< CONFIG_MAX_SOCKET
; ++socket
) {
362 if (!soc_cpu_is_enabled(socket
))
364 unsigned long tmp
= current
;
368 while ((dev
= dev_find_device(PCI_VID_INTEL
, MMAP_VTD_CFG_REG_DEVID
, dev
))) {
369 /* Only add devices for the current socket */
370 if (iio_pci_domain_socket_from_dev(dev
) != socket
)
372 /* See if there is a resource with the appropriate index. */
373 resource
= probe_resource(dev
, VTD_BAR_CSR
);
376 int stack
= iio_pci_domain_stack_from_dev(dev
);
378 uint64_t vtd_mmio_cap
= read64(res2mmio(resource
, VTD_EXT_CAP_LOW
, 0));
379 printk(BIOS_SPEW
, "%s socket: %d, stack: %d, bus: 0x%x, vtd_base: %p, "
380 "vtd_mmio_cap: 0x%llx\n",
381 __func__
, socket
, stack
, dev
->upstream
->secondary
,
382 res2mmio(resource
, 0, 0), vtd_mmio_cap
);
384 // ATSR is applicable only for platform supporting device IOTLBs
385 // through the VT-d extended capability register
386 assert(vtd_mmio_cap
!= 0xffffffffffffffff);
387 if ((vtd_mmio_cap
& 0x4) == 0) // BIT 2
390 if (dev
->upstream
->secondary
== 0 && dev
->upstream
->segment_group
== 0)
393 for (child
= dev
->upstream
->children
; child
; child
= child
->sibling
) {
394 if (!is_pci_bridge(child
))
397 acpi_create_dmar_ds_pci_br_for_port(
398 current
, child
, child
->upstream
->segment_group
, true, &first
);
402 acpi_dmar_atsr_fixup(tmp
, current
);
408 static unsigned long acpi_create_rmrr(unsigned long current
)
413 static unsigned long acpi_create_rhsa(unsigned long current
)
415 struct device
*dev
= NULL
;
416 struct resource
*resource
;
418 while ((dev
= dev_find_device(PCI_VID_INTEL
, MMAP_VTD_CFG_REG_DEVID
, dev
))) {
419 /* See if there is a resource with the appropriate index. */
420 resource
= probe_resource(dev
, VTD_BAR_CSR
);
424 printk(BIOS_DEBUG
, "[Remapping Hardware Static Affinity] Base Address: %p, "
425 "Proximity Domain: 0x%x\n", res2mmio(resource
, 0, 0), device_to_pd(dev
));
426 current
+= acpi_create_dmar_rhsa(current
, (uintptr_t)res2mmio(resource
, 0, 0), device_to_pd(dev
));
432 static unsigned long xeonsp_create_satc(unsigned long current
, struct device
*domain
)
434 struct device
*dev
= NULL
;
435 while ((dev
= dev_bus_each_child(domain
->downstream
, dev
))) {
436 if (pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_ID_ATS
, 0)) {
437 const uint32_t b
= domain
->downstream
->secondary
;
438 const uint32_t d
= PCI_SLOT(dev
->path
.pci
.devfn
);
439 const uint32_t f
= PCI_FUNC(dev
->path
.pci
.devfn
);
440 printk(BIOS_DEBUG
, " [SATC Endpoint Device] %s\n", dev_path(dev
));
441 current
+= acpi_create_dmar_ds_pci(current
, b
, d
, f
);
447 /* SoC Integrated Address Translation Cache */
448 static unsigned long acpi_create_satc(unsigned long current
)
450 unsigned long tmp
= current
, seg
= ~0;
454 * Best case only PCI segment group count SATC headers are emitted, worst
455 * case for every SATC entry a new SATC header is being generated.
457 * The assumption made here is that the host bridges on a socket share the
458 * PCI segment group and thus only one SATC header needs to be emitted for
460 * This is easier than to sort the host bridges by PCI segment group first
461 * and then generate one SATC header for every new segment.
463 * With this assumption the best case scenario should always be used.
465 for (int socket
= 0; socket
< CONFIG_MAX_SOCKET
; ++socket
) {
466 if (!soc_cpu_is_enabled(socket
))
470 while ((dev
= dev_find_path(dev
, DEVICE_PATH_DOMAIN
))) {
471 /* Only add devices for the current socket */
472 if (iio_pci_domain_socket_from_dev(dev
) != socket
)
475 if (seg
!= dev
->downstream
->segment_group
) {
476 // Close previous header
478 acpi_dmar_satc_fixup(tmp
, current
);
480 seg
= dev
->downstream
->segment_group
;
482 printk(BIOS_DEBUG
, "[SATC Segment Header] "
483 "Flags: 0x%x, PCI segment group: %lx\n", 0, seg
);
484 // Add the SATC header
485 current
+= acpi_create_dmar_satc(current
, 0, seg
);
487 current
= xeonsp_create_satc(current
, dev
);
491 acpi_dmar_satc_fixup(tmp
, current
);
496 static unsigned long acpi_fill_dmar(unsigned long current
)
498 const IIO_UDS
*hob
= get_iio_uds();
500 // DRHD - iommu0 must be the last DRHD entry.
501 struct device
*dev
= NULL
;
502 struct device
*iommu0
= NULL
;
503 while ((dev
= dev_find_device(PCI_VID_INTEL
, MMAP_VTD_CFG_REG_DEVID
, dev
))) {
504 if (is_dev_on_domain0(dev
)) {
508 current
= acpi_create_drhd(current
, dev
, hob
);
511 current
= acpi_create_drhd(current
, iommu0
, hob
);
514 current
= acpi_create_rmrr(current
);
516 // Root Port ATS Capability
517 current
= acpi_create_atsr(current
);
520 current
= acpi_create_rhsa(current
);
523 current
= acpi_create_satc(current
);
528 unsigned long northbridge_write_acpi_tables(const struct device
*device
, unsigned long current
,
529 struct acpi_rsdp
*rsdp
)
531 /* Only write uncore ACPI tables for domain0 */
532 if (!is_domain0(device
))
541 const config_t
*const config
= config_of(device
);
544 current
= ALIGN_UP(current
, 8);
545 printk(BIOS_DEBUG
, "ACPI: * SRAT at %lx\n", current
);
546 srat
= (acpi_srat_t
*)current
;
547 acpi_create_srat(srat
, acpi_fill_srat
);
548 current
+= srat
->header
.length
;
549 acpi_add_table(rsdp
, srat
);
552 current
= ALIGN_UP(current
, 8);
553 printk(BIOS_DEBUG
, "ACPI: * SLIT at %lx\n", current
);
554 slit
= (acpi_slit_t
*)current
;
555 acpi_create_slit(slit
, acpi_fill_slit
);
556 current
+= slit
->header
.length
;
557 acpi_add_table(rsdp
, slit
);
559 if (CONFIG(SOC_INTEL_HAS_CXL
)) {
561 current
= ALIGN_UP(current
, 8);
562 printk(BIOS_DEBUG
, "ACPI: * HMAT at %lx\n", current
);
563 hmat
= (acpi_hmat_t
*)current
;
564 acpi_create_hmat(hmat
, acpi_fill_hmat
);
565 current
+= hmat
->header
.length
;
566 acpi_add_table(rsdp
, hmat
);
570 if (config
->vtd_support
) {
571 current
= ALIGN_UP(current
, 8);
572 dmar
= (acpi_dmar_t
*)current
;
573 enum dmar_flags flags
= DMAR_INTR_REMAP
;
575 /* SKX FSP doesn't support X2APIC, but CPX FSP does */
576 if (CONFIG(SOC_INTEL_SKYLAKE_SP
))
577 flags
|= DMAR_X2APIC_OPT_OUT
;
579 printk(BIOS_DEBUG
, "ACPI: * DMAR at %lx\n", current
);
580 printk(BIOS_DEBUG
, "[DMA Remapping table] Flags: 0x%x\n", flags
);
581 acpi_create_dmar(dmar
, flags
, acpi_fill_dmar
);
582 current
+= dmar
->header
.length
;
583 current
= acpi_align_current(current
);
584 acpi_add_table(rsdp
, dmar
);
587 if (CONFIG(SOC_INTEL_HAS_CXL
)) {
588 /* CEDT: CXL Early Discovery Table */
589 if (get_cxl_node_count() > 0) {
590 current
= ALIGN_UP(current
, 8);
591 printk(BIOS_DEBUG
, "ACPI: * CEDT at %lx\n", current
);
592 cedt
= (acpi_cedt_t
*)current
;
593 acpi_create_cedt(cedt
, acpi_fill_cedt
);
594 current
+= cedt
->header
.length
;
595 acpi_add_table(rsdp
, cedt
);
599 if (CONFIG(SOC_ACPI_HEST
)) {
600 printk(BIOS_DEBUG
, "ACPI: * HEST at %lx\n", current
);
601 current
= hest_create(current
, rsdp
);