1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 #include <console/console.h>
6 #include <cpu/x86/lapic_def.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
10 #include <soc/chip_common.h>
11 #include <soc/iomap.h>
12 #include <soc/pci_devs.h>
13 #include <soc/ramstage.h>
16 #include <security/intel/txt/txt_platform.h>
17 #include <security/intel/txt/txt.h>
18 #include <soc/config.h>
20 #include <soc/soc_util.h>
23 struct proximity_domains pds
= {
33 const char *description
;
42 /* NCMEM and ME ranges are mutually exclusive */
54 size_t vtd_probe_bar_size(struct device
*dev
)
56 uint32_t id
= pci_read_config32(dev
, PCI_VENDOR_ID
);
57 assert(id
== (PCI_VID_INTEL
| (MMAP_VTD_CFG_REG_DEVID
<< 16)));
59 uint32_t val
= pci_read_config32(dev
, VTD_BAR_CSR
);
60 pci_write_config32(dev
, VTD_BAR_CSR
, (uint32_t)(-4 * KiB
));
61 size_t size
= (~(pci_read_config32(dev
, VTD_BAR_CSR
) & ((uint32_t)(-4 * KiB
)))) + 1;
63 pci_write_config32(dev
, VTD_BAR_CSR
, val
);
68 static struct map_entry memory_map
[NUM_MAP_ENTRIES
] = {
69 [TOHM_REG
] = MAP_ENTRY_LIMIT_64(VTD_TOHM_CSR
, 26, "TOHM"),
70 [MMIOL_REG
] = MAP_ENTRY_BASE_32(VTD_MMIOL_CSR
, "MMIOL"),
71 [MMCFG_BASE_REG
] = MAP_ENTRY_BASE_64(VTD_MMCFG_BASE_CSR
, "MMCFG_BASE"),
72 [MMCFG_LIMIT_REG
] = MAP_ENTRY_LIMIT_64(VTD_MMCFG_LIMIT_CSR
, 26, "MMCFG_LIMIT"),
73 [TOLM_REG
] = MAP_ENTRY_LIMIT_32(VTD_TOLM_CSR
, 26, "TOLM"),
74 #if CONFIG(SOC_INTEL_HAS_NCMEM)
75 [NCMEM_BASE_REG
] = MAP_ENTRY_BASE_64(VTD_NCMEM_BASE_CSR
, "NCMEM_BASE"),
76 [NCMEM_LIMIT_REG
] = MAP_ENTRY_LIMIT_64(VTD_NCMEM_LIMIT_CSR
, 19, "NCMEM_LIMIT"),
78 [ME_BASE_REG
] = MAP_ENTRY_BASE_64(VTD_ME_BASE_CSR
, "ME_BASE"),
79 [ME_LIMIT_REG
] = MAP_ENTRY_LIMIT_64(VTD_ME_LIMIT_CSR
, 19, "ME_LIMIT"),
81 [TSEG_BASE_REG
] = MAP_ENTRY_BASE_32(VTD_TSEG_BASE_CSR
, "TSEGMB_BASE"),
82 [TSEG_LIMIT_REG
] = MAP_ENTRY_LIMIT_32(VTD_TSEG_LIMIT_CSR
, 20, "TSEGMB_LIMIT"),
83 [VTDBAR_REG
] = MAP_ENTRY_BASE_32(VTD_BAR_CSR
, "VTD_BAR"),
86 static void read_map_entry(struct device
*dev
, struct map_entry
*entry
,
96 if (entry
->reg
== VTD_BAR_CSR
&& !(pci_read_config32(dev
, entry
->reg
) & 1)) {
97 /* VTDBAR is not enabled */
102 mask
= ((1ULL << entry
->mask_bits
) - 1);
107 if (entry
->is_64_bit
) {
108 value
= pci_read_config32(dev
, entry
->reg
+ sizeof(uint32_t));
112 value
|= (uint64_t)pci_read_config32(dev
, entry
->reg
);
121 static void mc_read_map_entries(struct device
*dev
, uint64_t *values
)
124 for (i
= 0; i
< NUM_MAP_ENTRIES
; i
++)
125 read_map_entry(dev
, &memory_map
[i
], &values
[i
]);
128 static void mc_report_map_entries(struct device
*dev
, uint64_t *values
)
131 for (i
= 0; i
< NUM_MAP_ENTRIES
; i
++) {
132 if (!memory_map
[i
].description
)
135 printk(BIOS_DEBUG
, "%s: MC MAP: %s: 0x%llx\n",
136 dev_path(dev
), memory_map
[i
].description
, values
[i
]);
140 static void configure_dpr(struct device
*dev
)
142 const uintptr_t cbmem_top_mb
= ALIGN_UP(cbmem_top(), MiB
) / MiB
;
143 union dpr_register dpr
= { .raw
= pci_read_config32(dev
, VTD_LTDPR
) };
145 /* The DPR lock bit has to be set sufficiently early. It looks like
146 * it cannot be set anymore after FSP-S.
150 dpr
.size
= dpr
.top
- cbmem_top_mb
;
151 pci_write_config32(dev
, VTD_LTDPR
, dpr
.raw
);
154 #define MC_DRAM_RESOURCE_MMIO_HIGH 0x1000
155 #define MC_DRAM_RESOURCE_ANON_START 0x1001
160 * +--------------------------+ TOCM (2 pow 46 - 1)
162 * +--------------------------+
163 * | MMIOH (relocatable) |
164 * +--------------------------+
166 * +--------------------------+ TOHM
167 * | High DRAM Memory |
168 * +--------------------------+ 4GiB (0x100000000)
169 * +--------------------------+ 0xFFFF_FFFF
171 * +--------------------------+ 0xFF00_0000
173 * +--------------------------+ 0xFEF0_0000
175 * +--------------------------+ 0xFEE0_0000
176 * | HPET/LT/TPM/Others |
177 * +--------------------------+ 0xFED0_0000
179 * +--------------------------+ 0xFEC0_0000
181 * +--------------------------+ 0xFEB8_0000
183 * +--------------------------+ 0xFEB0_0000
185 * +--------------------------+ 0xFE00_0000
186 * | MMIOL (relocatable) |
187 * | P2SB PCR cfg BAR | (0xfd000000 - 0xfdffffff
188 * | BAR space | [mem 0x90000000-0xfcffffff] available for PCI devices
189 * +--------------------------+ 0x9000_0000
190 * |PCIe MMCFG (relocatable) | CONFIG_ECAM_MMCONF_BASE_ADDRESS 64 or 256MB
191 * | | (0x80000000 - 0x8fffffff, 0x40000)
192 * +--------------------------+ TOLM
193 * | MEseg (relocatable) | 32, 64, 128 or 256 MB (0x78000000 - 0x7fffffff, 0x20000)
194 * +--------------------------+
195 * | Tseg (relocatable) | N x 8MB (0x70000000 - 0x77ffffff, 0x20000)
196 * +--------------------------+
198 * +--------------------------+ 1M aligned DPR base
200 * +--------------------------+ cbmem_top
201 * | Reserved - CBMEM | (0x6fffe000 - 0x6fffffff, 0x2000)
202 * +--------------------------+
203 * | Reserved - FSP | (0x6fbfe000 - 0x6fffdfff, 0x400000)
204 * +--------------------------+ top_of_ram (0x6fbfdfff)
205 * | Low DRAM Memory |
206 * +--------------------------+ FFFFF (1MB)
208 * +--------------------------+ E0000
210 * +--------------------------+ C0000
211 * | VGA & SMM Memory |
212 * +--------------------------+ A0000
213 * | Conventional Memory |
215 * +--------------------------+ 0
218 static void mc_add_dram_resources(struct device
*dev
, int *res_count
)
220 const struct resource
*res
;
221 uint64_t mc_values
[NUM_MAP_ENTRIES
];
223 int index
= *res_count
;
224 struct range_entry fsp_mem
;
226 /* Read in the MAP registers and report their values. */
227 mc_read_map_entries(dev
, &mc_values
[0]);
228 mc_report_map_entries(dev
, &mc_values
[0]);
230 if (mc_values
[VTDBAR_REG
]) {
231 res
= mmio_range(dev
, VTD_BAR_CSR
, mc_values
[VTDBAR_REG
],
232 vtd_probe_bar_size(dev
));
233 LOG_RESOURCE("vtd_bar", dev
, res
);
236 /* Only add dram resources once. */
237 if (dev
->upstream
->secondary
!= 0 || dev
->upstream
->segment_group
!= 0)
240 /* Conventional Memory (DOS region, 0x0 to 0x9FFFF) */
241 res
= ram_from_to(dev
, index
++, 0, 0xa0000);
242 LOG_RESOURCE("legacy_ram", dev
, res
);
244 /* 1MB -> top_of_ram */
245 fsp_find_reserved_memory(&fsp_mem
);
246 top_of_ram
= range_entry_base(&fsp_mem
) - 1;
247 res
= ram_from_to(dev
, index
++, 1 * MiB
, top_of_ram
);
248 LOG_RESOURCE("low_ram", dev
, res
);
250 /* top_of_ram -> cbmem_top */
251 res
= ram_from_to(dev
, index
++, top_of_ram
, cbmem_top());
252 LOG_RESOURCE("cbmem_ram", dev
, res
);
254 /* Mark TSEG/SMM region as reserved */
255 res
= reserved_ram_from_to(dev
, index
++, mc_values
[TSEG_BASE_REG
],
256 mc_values
[TSEG_LIMIT_REG
] + 1);
257 LOG_RESOURCE("mmio_tseg", dev
, res
);
259 /* Reserve DPR region */
260 union dpr_register dpr
= { .raw
= pci_read_config32(dev
, VTD_LTDPR
) };
263 * cbmem_top -> DPR base:
264 * DPR has a 1M granularity so it's possible if cbmem_top is not 1M
265 * aligned that some memory does not get marked as assigned.
267 res
= reserved_ram_from_to(dev
, index
++, cbmem_top(),
268 (dpr
.top
- dpr
.size
) * MiB
);
269 LOG_RESOURCE("unused_dram", dev
, res
);
271 /* DPR base -> DPR top */
272 res
= reserved_ram_from_to(dev
, index
++, (dpr
.top
- dpr
.size
) * MiB
,
274 LOG_RESOURCE("dpr", dev
, res
);
277 /* Mark TSEG/SMM region as reserved */
278 res
= reserved_ram_from_to(dev
, index
++, mc_values
[TSEG_BASE_REG
],
279 mc_values
[TSEG_LIMIT_REG
] + 1);
280 LOG_RESOURCE("mmio_tseg", dev
, res
);
282 /* Mark region between TSEG - TOLM (eg. MESEG) as reserved */
283 res
= reserved_ram_from_to(dev
, index
++, mc_values
[TSEG_LIMIT_REG
] + 1,
284 mc_values
[TOLM_REG
]);
285 LOG_RESOURCE("mmio_tolm", dev
, res
);
288 const struct SystemMemoryMapHob
*mm
= get_system_memory_map();
290 for (int i
= 0; i
< mm
->numberEntries
; i
++) {
291 const struct SystemMemoryMapElement
*e
= &mm
->Element
[i
];
292 uint64_t addr
= ((uint64_t)e
->BaseAddress
<< MEM_ADDR_64MB_SHIFT_BITS
);
293 uint64_t size
= ((uint64_t)e
->ElementSize
<< MEM_ADDR_64MB_SHIFT_BITS
);
294 if (addr
< 4ULL * GiB
)
296 if (!is_memtype_processor_attached(e
->Type
))
298 if (is_memtype_reserved(e
->Type
))
301 res
= ram_range(dev
, index
++, addr
, size
);
302 LOG_RESOURCE("high_ram", dev
, res
);
305 if (CONFIG(SOC_INTEL_HAS_CXL
)) {
308 for (i
= 0; i
< pds
.num_pds
; i
++) {
309 if (pds
.pds
[i
].pd_type
!= PD_TYPE_GENERIC_INITIATOR
)
312 unsigned long flags
= IORESOURCE_CACHEABLE
;
313 int cxl_mode
= get_cxl_mode();
314 if (cxl_mode
== XEONSP_CXL_SP_MEM
)
315 flags
|= IORESOURCE_SOFT_RESERVE
;
317 flags
|= IORESOURCE_STORED
;
319 res
= fixed_mem_range_flags(dev
, index
++,
320 (uint64_t)pds
.pds
[i
].base
<< 26,
321 (uint64_t)pds
.pds
[i
].size
<< 26, flags
);
322 if (cxl_mode
== XEONSP_CXL_SP_MEM
)
323 LOG_RESOURCE("specific_purpose_memory", dev
, res
);
325 LOG_RESOURCE("CXL_memory", dev
, res
);
329 /* add MMIO CFG resource */
330 res
= mmio_from_to(dev
, index
++, mc_values
[MMCFG_BASE_REG
],
331 mc_values
[MMCFG_LIMIT_REG
] + 1);
332 LOG_RESOURCE("mmiocfg_res", dev
, res
);
334 /* add Local APIC resource */
335 res
= mmio_range(dev
, index
++, LAPIC_DEFAULT_BASE
, 0x00001000);
336 LOG_RESOURCE("apic_res", dev
, res
);
339 * Add legacy region as reserved - 0xa000 - 1MB
340 * Reserve everything between A segment and 1MB:
342 * 0xa0000 - 0xbffff: legacy VGA
343 * 0xc0000 - 0xfffff: RAM
345 res
= mmio_range(dev
, index
++, VGA_MMIO_BASE
, VGA_MMIO_SIZE
);
346 LOG_RESOURCE("legacy_mmio", dev
, res
);
348 res
= reserved_ram_from_to(dev
, index
++, 0xc0000, 1 * MiB
);
349 LOG_RESOURCE("legacy_write_protect", dev
, res
);
354 static void mmapvtd_read_resources(struct device
*dev
)
356 int index
= MC_DRAM_RESOURCE_ANON_START
;
358 /* Read standard PCI resources. */
359 pci_dev_read_resources(dev
);
364 /* Calculate and add DRAM resources. */
365 mc_add_dram_resources(dev
, &index
);
368 static void mmapvtd_set_resources(struct device
*dev
)
371 * The MMIO high window has to be added in set_resources() instead of
372 * read_resources(). Because adding in read_resources() would cause the
373 * whole window to be reserved, and it couldn't be used for resource
376 if (is_domain0(dev
->upstream
->dev
)) {
377 resource_t mmio64_base
, mmio64_size
;
378 if (get_mmio_high_base_size(&mmio64_base
, &mmio64_size
)) {
379 assert(!probe_resource(dev
, MC_DRAM_RESOURCE_MMIO_HIGH
));
380 fixed_mem_range_flags(dev
, MC_DRAM_RESOURCE_MMIO_HIGH
,
381 mmio64_base
, mmio64_size
, IORESOURCE_STORED
);
385 pci_dev_set_resources(dev
);
388 static void mmapvtd_init(struct device
*dev
)
392 static struct device_operations mmapvtd_ops
= {
393 .read_resources
= mmapvtd_read_resources
,
394 .set_resources
= mmapvtd_set_resources
,
395 .enable_resources
= pci_dev_enable_resources
,
396 .init
= mmapvtd_init
,
397 .ops_pci
= &soc_pci_ops
,
400 static const unsigned short mmapvtd_ids
[] = {
401 MMAP_VTD_CFG_REG_DEVID
, /* Memory Map/IntelĀ® VT-d Configuration Registers */
405 static const struct pci_driver mmapvtd_driver __pci_driver
= {
407 .vendor
= PCI_VID_INTEL
,
408 .devices
= mmapvtd_ids
411 #if !CONFIG(SOC_INTEL_MMAPVTD_ONLY_FOR_DPR)
412 static void vtd_read_resources(struct device
*dev
)
414 pci_dev_read_resources(dev
);
419 static struct device_operations vtd_ops
= {
420 .read_resources
= vtd_read_resources
,
421 .set_resources
= pci_dev_set_resources
,
422 .enable_resources
= pci_dev_enable_resources
,
423 .ops_pci
= &soc_pci_ops
,
426 /* VTD devices on other stacks */
427 static const struct pci_driver vtd_driver __pci_driver
= {
429 .vendor
= PCI_VID_INTEL
,
430 .device
= MMAP_VTD_STACK_CFG_REG_DEVID
,
434 static void dmi3_init(struct device
*dev
)
436 if (CONFIG(INTEL_TXT
) && skip_intel_txt_lockdown())
438 /* Disable error injection */
439 pci_or_config16(dev
, ERRINJCON
, 1 << 0);
442 * DMIRCBAR registers are not TXT lockable, but the BAR enable
443 * bit is. TXT requires that DMIRCBAR be disabled for security.
445 pci_and_config32(dev
, DMIRCBAR
, ~(1 << 0));
448 static struct device_operations dmi3_ops
= {
449 .read_resources
= pci_dev_read_resources
,
450 .set_resources
= pci_dev_set_resources
,
451 .enable_resources
= pci_dev_enable_resources
,
453 .ops_pci
= &soc_pci_ops
,
456 static const struct pci_driver dmi3_driver __pci_driver
= {
458 .vendor
= PCI_VID_INTEL
,
459 .device
= DMI3_DEVID
,
462 static void iio_dfx_global_init(struct device
*dev
)
464 if (CONFIG(INTEL_TXT
) && skip_intel_txt_lockdown())
468 pci_or_config16(dev
, IIO_DFX_LCK_CTL
, 0x3ff);
469 reg16
= pci_read_config16(dev
, IIO_DFX_TSWCTL0
);
470 reg16
&= ~(1 << 4); // allow ib mmio cfg
471 reg16
&= ~(1 << 5); // ignore acs p2p ma lpbk
472 reg16
|= (1 << 3); // me disable
473 pci_write_config16(dev
, IIO_DFX_TSWCTL0
, reg16
);
476 static const unsigned short iio_dfx_global_ids
[] = {
482 static struct device_operations iio_dfx_global_ops
= {
483 .read_resources
= pci_dev_read_resources
,
484 .set_resources
= pci_dev_set_resources
,
485 .enable_resources
= pci_dev_enable_resources
,
486 .init
= iio_dfx_global_init
,
487 .ops_pci
= &soc_pci_ops
,
490 static const struct pci_driver iio_dfx_global_driver __pci_driver
= {
491 .ops
= &iio_dfx_global_ops
,
492 .vendor
= PCI_VID_INTEL
,
493 .devices
= iio_dfx_global_ids
,