soc/intel/xeon_sp: Drop unused code
[coreboot2.git] / src / soc / intel / xeon_sp / uncore.c
blob1d77675aeb5285869d90b79acbc0e61faf9a9518
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <arch/vga.h>
4 #include <cbmem.h>
5 #include <console/console.h>
6 #include <cpu/x86/lapic_def.h>
7 #include <cpu/x86/mtrr.h>
8 #include <device/pci.h>
9 #include <device/pci_ids.h>
10 #include <intelblocks/msr.h>
11 #include <soc/acpi.h>
12 #include <soc/chip_common.h>
13 #include <soc/iomap.h>
14 #include <soc/pci_devs.h>
15 #include <soc/ramstage.h>
16 #include <soc/util.h>
17 #include <fsp/util.h>
18 #include <security/intel/txt/txt_platform.h>
19 #include <security/intel/txt/txt.h>
20 #include <soc/config.h>
21 #include <soc/numa.h>
22 #include <soc/soc_util.h>
23 #include <stdint.h>
25 struct proximity_domains pds = {
26 .num_pds = 0,
27 .pds = NULL,
30 struct map_entry {
31 uint32_t reg;
32 int is_64_bit;
33 int is_limit;
34 int mask_bits;
35 const char *description;
38 enum {
39 TOHM_REG,
40 MMIOL_REG,
41 MMCFG_BASE_REG,
42 MMCFG_LIMIT_REG,
43 TOLM_REG,
44 /* NCMEM and ME ranges are mutually exclusive */
45 NCMEM_BASE_REG,
46 NCMEM_LIMIT_REG,
47 ME_BASE_REG,
48 ME_LIMIT_REG,
49 TSEG_BASE_REG,
50 TSEG_LIMIT_REG,
51 VTDBAR_REG,
52 /* Must be last. */
53 NUM_MAP_ENTRIES
56 size_t vtd_probe_bar_size(struct device *dev)
58 uint32_t id = pci_read_config32(dev, PCI_VENDOR_ID);
59 assert(id == (PCI_VID_INTEL | (MMAP_VTD_CFG_REG_DEVID << 16)));
61 uint32_t val = pci_read_config32(dev, VTD_BAR_CSR);
62 pci_write_config32(dev, VTD_BAR_CSR, (uint32_t)(-4 * KiB));
63 size_t size = (~(pci_read_config32(dev, VTD_BAR_CSR) & ((uint32_t)(-4 * KiB)))) + 1;
64 assert(size != 0);
65 pci_write_config32(dev, VTD_BAR_CSR, val);
67 return size;
70 static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
71 [TOHM_REG] = MAP_ENTRY_LIMIT_64(VTD_TOHM_CSR, 26, "TOHM"),
72 [MMIOL_REG] = MAP_ENTRY_BASE_32(VTD_MMIOL_CSR, "MMIOL"),
73 [MMCFG_BASE_REG] = MAP_ENTRY_BASE_64(VTD_MMCFG_BASE_CSR, "MMCFG_BASE"),
74 [MMCFG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_MMCFG_LIMIT_CSR, 26, "MMCFG_LIMIT"),
75 [TOLM_REG] = MAP_ENTRY_LIMIT_32(VTD_TOLM_CSR, 26, "TOLM"),
76 #if CONFIG(SOC_INTEL_HAS_NCMEM)
77 [NCMEM_BASE_REG] = MAP_ENTRY_BASE_64(VTD_NCMEM_BASE_CSR, "NCMEM_BASE"),
78 [NCMEM_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_NCMEM_LIMIT_CSR, 19, "NCMEM_LIMIT"),
79 #else
80 [ME_BASE_REG] = MAP_ENTRY_BASE_64(VTD_ME_BASE_CSR, "ME_BASE"),
81 [ME_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_ME_LIMIT_CSR, 19, "ME_LIMIT"),
82 #endif
83 [TSEG_BASE_REG] = MAP_ENTRY_BASE_32(VTD_TSEG_BASE_CSR, "TSEGMB_BASE"),
84 [TSEG_LIMIT_REG] = MAP_ENTRY_LIMIT_32(VTD_TSEG_LIMIT_CSR, 20, "TSEGMB_LIMIT"),
85 [VTDBAR_REG] = MAP_ENTRY_BASE_32(VTD_BAR_CSR, "VTD_BAR"),
88 static void read_map_entry(struct device *dev, struct map_entry *entry,
89 uint64_t *result)
91 uint64_t value;
92 uint64_t mask;
94 if (!entry->reg) {
95 *result = 0;
96 return;
98 if (entry->reg == VTD_BAR_CSR && !(pci_read_config32(dev, entry->reg) & 1)) {
99 /* VTDBAR is not enabled */
100 *result = 0;
101 return;
104 mask = ((1ULL << entry->mask_bits) - 1);
105 mask = ~mask;
107 value = 0;
109 if (entry->is_64_bit) {
110 value = pci_read_config32(dev, entry->reg + sizeof(uint32_t));
111 value <<= 32;
114 value |= (uint64_t)pci_read_config32(dev, entry->reg);
115 value &= mask;
117 if (entry->is_limit)
118 value |= ~mask;
120 *result = value;
123 static void mc_read_map_entries(struct device *dev, uint64_t *values)
125 int i;
126 for (i = 0; i < NUM_MAP_ENTRIES; i++)
127 read_map_entry(dev, &memory_map[i], &values[i]);
130 static void mc_report_map_entries(struct device *dev, uint64_t *values)
132 int i;
133 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
134 if (!memory_map[i].description)
135 continue;
137 printk(BIOS_DEBUG, "%s: MC MAP: %s: 0x%llx\n",
138 dev_path(dev), memory_map[i].description, values[i]);
142 static void configure_dpr(struct device *dev)
144 const uintptr_t cbmem_top_mb = ALIGN_UP(cbmem_top(), MiB) / MiB;
145 union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) };
147 /* The DPR lock bit has to be set sufficiently early. It looks like
148 * it cannot be set anymore after FSP-S.
150 dpr.lock = 1;
151 dpr.epm = 1;
152 dpr.size = dpr.top - cbmem_top_mb;
153 pci_write_config32(dev, VTD_LTDPR, dpr.raw);
156 #define MC_DRAM_RESOURCE_MMIO_HIGH 0x1000
157 #define MC_DRAM_RESOURCE_ANON_START 0x1001
159 __weak unsigned int get_prmrr_count(void)
161 return 0x0;
164 static bool get_prmrr_region(unsigned int msr_addr, uint64_t *base, uint64_t *size)
166 /* Check if processor supports PRMRR */
167 msr_t msr1 = rdmsr(MTRR_CAP_MSR);
168 if (!(msr1.lo & MTRR_CAP_PRMRR)) {
169 printk(BIOS_ERR, "%s(): PRMRR is not supported.\n", __func__);
170 return false;
173 /* Mask out bits 0-11 to get the base address */
174 *base = msr_read(msr_addr) & ~((1 << RANGE_SHIFT) - 1);
176 uint64_t mask = msr_read(MSR_PRMRR_PHYS_MASK);
177 *size = calculate_var_mtrr_size(mask);
179 return (*base && *size);
183 * Host Memory Map:
185 * +--------------------------+ TOCM (2 pow 46 - 1)
186 * | Reserved |
187 * +--------------------------+
188 * | MMIOH (relocatable) |
189 * +--------------------------+
190 * | PCISeg |
191 * +--------------------------+ TOHM
192 * | High DRAM Memory |
193 * +--------------------------+ 4GiB (0x100000000)
194 * +--------------------------+ 0xFFFF_FFFF
195 * | Firmware |
196 * +--------------------------+ 0xFF00_0000
197 * | Reserved |
198 * +--------------------------+ 0xFEF0_0000
199 * | Local xAPIC |
200 * +--------------------------+ 0xFEE0_0000
201 * | HPET/LT/TPM/Others |
202 * +--------------------------+ 0xFED0_0000
203 * | I/O xAPIC |
204 * +--------------------------+ 0xFEC0_0000
205 * | Reserved |
206 * +--------------------------+ 0xFEB8_0000
207 * | Reserved |
208 * +--------------------------+ 0xFEB0_0000
209 * | Reserved |
210 * +--------------------------+ 0xFE00_0000
211 * | MMIOL (relocatable) |
212 * | P2SB PCR cfg BAR | (0xfd000000 - 0xfdffffff
213 * | BAR space | [mem 0x90000000-0xfcffffff] available for PCI devices
214 * +--------------------------+ 0x9000_0000
215 * |PCIe MMCFG (relocatable) | CONFIG_ECAM_MMCONF_BASE_ADDRESS 64 or 256MB
216 * | | (0x80000000 - 0x8fffffff, 0x40000)
217 * +--------------------------+ TOLM
218 * | MEseg (relocatable) | 32, 64, 128 or 256 MB (0x78000000 - 0x7fffffff, 0x20000)
219 * +--------------------------+
220 * | Tseg (relocatable) | N x 8MB (0x70000000 - 0x77ffffff, 0x20000)
221 * +--------------------------+
222 * | DPR |
223 * +--------------------------+ 1M aligned DPR base
224 * | Unused memory |
225 * +--------------------------+ cbmem_top
226 * | Reserved - CBMEM | (0x6fffe000 - 0x6fffffff, 0x2000)
227 * +--------------------------+
228 * | Reserved - FSP | (0x6fbfe000 - 0x6fffdfff, 0x400000)
229 * +--------------------------+ top_of_ram (0x6fbfdfff)
230 * | Low DRAM Memory |
231 * +--------------------------+ FFFFF (1MB)
232 * | E & F segments |
233 * +--------------------------+ E0000
234 * | C & D segments |
235 * +--------------------------+ C0000
236 * | VGA & SMM Memory |
237 * +--------------------------+ A0000
238 * | Conventional Memory |
239 * | (DOS Range) |
240 * +--------------------------+ 0
243 static void mc_add_dram_resources(struct device *dev, int *res_count)
245 const struct resource *res;
246 uint64_t mc_values[NUM_MAP_ENTRIES];
247 uint64_t top_of_ram;
248 int index = *res_count;
249 struct range_entry fsp_mem;
251 /* Read in the MAP registers and report their values. */
252 mc_read_map_entries(dev, &mc_values[0]);
253 mc_report_map_entries(dev, &mc_values[0]);
255 if (mc_values[VTDBAR_REG]) {
256 res = mmio_range(dev, VTD_BAR_CSR, mc_values[VTDBAR_REG],
257 vtd_probe_bar_size(dev));
258 LOG_RESOURCE("vtd_bar", dev, res);
261 /* Only add dram resources once. */
262 if (dev->upstream->secondary != 0 || dev->upstream->segment_group != 0)
263 return;
265 /* Conventional Memory (DOS region, 0x0 to 0x9FFFF) */
266 res = ram_from_to(dev, index++, 0, 0xa0000);
267 LOG_RESOURCE("legacy_ram", dev, res);
269 /* 1MB -> top_of_ram */
270 fsp_find_reserved_memory(&fsp_mem);
271 top_of_ram = range_entry_base(&fsp_mem) - 1;
272 res = ram_from_to(dev, index++, 1 * MiB, top_of_ram);
273 LOG_RESOURCE("low_ram", dev, res);
275 /* top_of_ram -> cbmem_top */
276 res = ram_from_to(dev, index++, top_of_ram, cbmem_top());
277 LOG_RESOURCE("cbmem_ram", dev, res);
279 /* Mark TSEG/SMM region as reserved */
280 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG],
281 mc_values[TSEG_LIMIT_REG] + 1);
282 LOG_RESOURCE("mmio_tseg", dev, res);
284 /* Reserve DPR region */
285 union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) };
286 if (dpr.size) {
288 * cbmem_top -> DPR base:
289 * DPR has a 1M granularity so it's possible if cbmem_top is not 1M
290 * aligned that some memory does not get marked as assigned.
292 res = reserved_ram_from_to(dev, index++, cbmem_top(),
293 (dpr.top - dpr.size) * MiB);
294 LOG_RESOURCE("unused_dram", dev, res);
296 /* DPR base -> DPR top */
297 res = reserved_ram_from_to(dev, index++, (dpr.top - dpr.size) * MiB,
298 dpr.top * MiB);
299 LOG_RESOURCE("dpr", dev, res);
302 /* Mark TSEG/SMM region as reserved */
303 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG],
304 mc_values[TSEG_LIMIT_REG] + 1);
305 LOG_RESOURCE("mmio_tseg", dev, res);
307 /* Mark region between TSEG - TOLM (eg. MESEG) as reserved */
308 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_LIMIT_REG] + 1,
309 mc_values[TOLM_REG]);
310 LOG_RESOURCE("mmio_tolm", dev, res);
312 /* Add high RAM */
313 const struct SystemMemoryMapHob *mm = get_system_memory_map();
315 for (int i = 0; i < mm->numberEntries; i++) {
316 const struct SystemMemoryMapElement *e = &mm->Element[i];
317 uint64_t addr = ((uint64_t)e->BaseAddress << MEM_ADDR_64MB_SHIFT_BITS);
318 uint64_t size = ((uint64_t)e->ElementSize << MEM_ADDR_64MB_SHIFT_BITS);
319 if (addr < 4ULL * GiB)
320 continue;
321 if (!is_memtype_processor_attached(e->Type))
322 continue;
323 if (is_memtype_reserved(e->Type))
324 continue;
326 res = ram_range(dev, index++, addr, size);
327 LOG_RESOURCE("high_ram", dev, res);
330 uint64_t prmrr_base, prmrr_size;
331 for (unsigned int i = 0; i < get_prmrr_count(); i++) {
332 if (get_prmrr_region(MSR_PRMRR_BASE(i), &prmrr_base, &prmrr_size)) {
333 res = reserved_ram_range(dev, index++, prmrr_base, prmrr_size);
334 LOG_RESOURCE("prmrr", dev, res);
338 if (CONFIG(SOC_INTEL_HAS_CXL)) {
339 /* CXL Memory */
340 uint8_t i;
341 for (i = 0; i < pds.num_pds; i++) {
342 if (pds.pds[i].pd_type != PD_TYPE_GENERIC_INITIATOR)
343 continue;
345 unsigned long flags = IORESOURCE_CACHEABLE;
346 int cxl_mode = get_cxl_mode();
347 if (cxl_mode == XEONSP_CXL_SP_MEM)
348 flags |= IORESOURCE_SOFT_RESERVE;
349 else
350 flags |= IORESOURCE_STORED;
352 res = fixed_mem_range_flags(dev, index++,
353 (uint64_t)pds.pds[i].base << 26,
354 (uint64_t)pds.pds[i].size << 26, flags);
355 if (cxl_mode == XEONSP_CXL_SP_MEM)
356 LOG_RESOURCE("specific_purpose_memory", dev, res);
357 else
358 LOG_RESOURCE("CXL_memory", dev, res);
362 /* add MMIO CFG resource */
363 res = mmio_from_to(dev, index++, mc_values[MMCFG_BASE_REG],
364 mc_values[MMCFG_LIMIT_REG] + 1);
365 LOG_RESOURCE("mmiocfg_res", dev, res);
367 /* add Local APIC resource */
368 res = mmio_range(dev, index++, LAPIC_DEFAULT_BASE, 0x00001000);
369 LOG_RESOURCE("apic_res", dev, res);
372 * Add legacy region as reserved - 0xa000 - 1MB
373 * Reserve everything between A segment and 1MB:
375 * 0xa0000 - 0xbffff: legacy VGA
376 * 0xc0000 - 0xfffff: RAM
378 res = mmio_range(dev, index++, VGA_MMIO_BASE, VGA_MMIO_SIZE);
379 LOG_RESOURCE("legacy_mmio", dev, res);
381 res = reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
382 LOG_RESOURCE("legacy_write_protect", dev, res);
384 *res_count = index;
387 static void mmapvtd_read_resources(struct device *dev)
389 int index = MC_DRAM_RESOURCE_ANON_START;
391 /* Read standard PCI resources. */
392 pci_dev_read_resources(dev);
394 /* set up DPR */
395 configure_dpr(dev);
397 /* Calculate and add DRAM resources. */
398 mc_add_dram_resources(dev, &index);
401 static void mmapvtd_set_resources(struct device *dev)
404 * The MMIO high window has to be added in set_resources() instead of
405 * read_resources(). Because adding in read_resources() would cause the
406 * whole window to be reserved, and it couldn't be used for resource
407 * allocation.
409 if (is_domain0(dev->upstream->dev)) {
410 resource_t mmio64_base, mmio64_size;
411 if (get_mmio_high_base_size(&mmio64_base, &mmio64_size)) {
412 assert(!probe_resource(dev, MC_DRAM_RESOURCE_MMIO_HIGH));
413 fixed_mem_range_flags(dev, MC_DRAM_RESOURCE_MMIO_HIGH,
414 mmio64_base, mmio64_size, IORESOURCE_STORED);
418 pci_dev_set_resources(dev);
421 static void mmapvtd_init(struct device *dev)
425 static struct device_operations mmapvtd_ops = {
426 .read_resources = mmapvtd_read_resources,
427 .set_resources = mmapvtd_set_resources,
428 .enable_resources = pci_dev_enable_resources,
429 .init = mmapvtd_init,
430 .ops_pci = &soc_pci_ops,
433 static const unsigned short mmapvtd_ids[] = {
434 MMAP_VTD_CFG_REG_DEVID, /* Memory Map/IntelĀ® VT-d Configuration Registers */
438 static const struct pci_driver mmapvtd_driver __pci_driver = {
439 .ops = &mmapvtd_ops,
440 .vendor = PCI_VID_INTEL,
441 .devices = mmapvtd_ids
444 #if !CONFIG(SOC_INTEL_MMAPVTD_ONLY_FOR_DPR)
445 static void vtd_read_resources(struct device *dev)
447 pci_dev_read_resources(dev);
449 configure_dpr(dev);
452 static struct device_operations vtd_ops = {
453 .read_resources = vtd_read_resources,
454 .set_resources = pci_dev_set_resources,
455 .enable_resources = pci_dev_enable_resources,
456 .ops_pci = &soc_pci_ops,
459 /* VTD devices on other stacks */
460 static const struct pci_driver vtd_driver __pci_driver = {
461 .ops = &vtd_ops,
462 .vendor = PCI_VID_INTEL,
463 .device = MMAP_VTD_STACK_CFG_REG_DEVID,
465 #endif
467 static void dmi3_init(struct device *dev)
469 if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown())
470 return;
471 /* Disable error injection */
472 pci_or_config16(dev, ERRINJCON, 1 << 0);
475 * DMIRCBAR registers are not TXT lockable, but the BAR enable
476 * bit is. TXT requires that DMIRCBAR be disabled for security.
478 pci_and_config32(dev, DMIRCBAR, ~(1 << 0));
481 static struct device_operations dmi3_ops = {
482 .read_resources = pci_dev_read_resources,
483 .set_resources = pci_dev_set_resources,
484 .enable_resources = pci_dev_enable_resources,
485 .init = dmi3_init,
486 .ops_pci = &soc_pci_ops,
489 static const struct pci_driver dmi3_driver __pci_driver = {
490 .ops = &dmi3_ops,
491 .vendor = PCI_VID_INTEL,
492 .device = DMI3_DEVID,
495 static void iio_dfx_global_init(struct device *dev)
497 if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown())
498 return;
500 uint16_t reg16;
501 pci_or_config16(dev, IIO_DFX_LCK_CTL, 0x3ff);
502 reg16 = pci_read_config16(dev, IIO_DFX_TSWCTL0);
503 reg16 &= ~(1 << 4); // allow ib mmio cfg
504 reg16 &= ~(1 << 5); // ignore acs p2p ma lpbk
505 reg16 |= (1 << 3); // me disable
506 pci_write_config16(dev, IIO_DFX_TSWCTL0, reg16);
509 static const unsigned short iio_dfx_global_ids[] = {
510 0x202d,
511 0x203d,
515 static struct device_operations iio_dfx_global_ops = {
516 .read_resources = pci_dev_read_resources,
517 .set_resources = pci_dev_set_resources,
518 .enable_resources = pci_dev_enable_resources,
519 .init = iio_dfx_global_init,
520 .ops_pci = &soc_pci_ops,
523 static const struct pci_driver iio_dfx_global_driver __pci_driver = {
524 .ops = &iio_dfx_global_ops,
525 .vendor = PCI_VID_INTEL,
526 .devices = iio_dfx_global_ids,