soc/intel: Remove blank lines before '}' and after '{'
[coreboot2.git] / src / soc / intel / xeon_sp / chip_common.c
blob3ca0eca5ee550cd9505104a1d36c28082c453ff8
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <acpi/acpigen_pci.h>
4 #include <assert.h>
5 #include <console/console.h>
6 #include <device/pci.h>
7 #include <intelblocks/acpi.h>
8 #include <post.h>
9 #include <soc/acpi.h>
10 #include <soc/chip_common.h>
11 #include <soc/soc_util.h>
12 #include <soc/util.h>
13 #include <stdlib.h>
15 static const STACK_RES *domain_to_stack_res(const struct device *dev)
17 assert(dev->path.type == DEVICE_PATH_DOMAIN);
18 const union xeon_domain_path dn = {
19 .domain_path = dev->path.domain.domain
22 const IIO_UDS *hob = get_iio_uds();
23 assert(hob != NULL);
25 return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
28 /**
29 * Find all device of a given vendor and type for the specified socket.
30 * The function iterates over all PCI domains of the specified socket
31 * and matches the PCI vendor and device ID.
33 * @param socket The socket where to search for the device.
34 * @param vendor A PCI vendor ID (e.g. 0x8086 for Intel).
35 * @param device A PCI device ID.
36 * @param from The device pointer to start search from.
38 * @return Pointer to the device struct. When there are multiple device
39 * instances, the caller should continue search upon a non-NULL match.
41 struct device *dev_find_all_devices_on_socket(uint8_t socket, u16 vendor, u16 device,
42 struct device *from)
44 return dev_find_all_devices_on_stack(socket, XEONSP_STACK_MAX, vendor, device, from);
48 * Find device of a given vendor and type for the specified socket.
49 * The function will return at the 1st match.
51 struct device *dev_find_device_on_socket(uint8_t socket, u16 vendor, u16 device)
53 return dev_find_all_devices_on_socket(socket, vendor, device, NULL);
56 static int filter_device_on_stack(struct device *dev, uint8_t socket, uint8_t stack,
57 u16 vendor, u16 device)
59 struct device *domain = dev_get_pci_domain(dev);
60 if (!domain)
61 return 0;
62 if (dev->path.type != DEVICE_PATH_PCI)
63 return 0;
65 union xeon_domain_path dn;
66 dn.domain_path = domain->path.domain.domain;
68 if (socket != XEONSP_SOCKET_MAX && dn.socket != socket)
69 return 0;
70 if (stack != XEONSP_STACK_MAX && dn.stack != stack)
71 return 0;
72 if (vendor != XEONSP_VENDOR_MAX && dev->vendor != vendor)
73 return 0;
74 if (device != XEONSP_DEVICE_MAX && dev->device != device)
75 return 0;
77 return 1;
80 /**
81 * Find all device of a given vendor and type for the specified socket and stack.
83 * @param socket The socket where to search for the device.
84 * XEONSP_SOCKET_MAX indicates any socket.
85 * @param stack The stack where to search for the device.
86 * XEONSP_STACK_MAX indicates any stack.
87 * @param vendor A PCI vendor ID (e.g. 0x8086 for Intel).
88 * XEONSP_VENDOR_MAX indicates any vendor.
89 * @param device A PCI device ID.
90 * XEONSP_DEVICE_MAX indicates any device.
91 * @param from The device pointer to start search from.
93 * @return Pointer to the device struct. When there are multiple device
94 * instances, the caller should continue search upon a non-NULL match.
96 struct device *dev_find_all_devices_on_stack(uint8_t socket, uint8_t stack,
97 u16 vendor, u16 device, struct device *from)
99 if (!from)
100 from = all_devices;
101 else
102 from = from->next;
104 while (from && (!filter_device_on_stack(from, socket, stack,
105 vendor, device)))
106 from = from->next;
108 return from;
112 * Find all device of a given vendor and type for the specific domain
113 * Only the direct child of the input domain is iterated
115 * @param domain Pointer to the input domain
116 * @param vendor A PCI vendor ID
117 * XEONSP_VENDOR_MAX indicates any vendor
118 * @param vendor A PCI device ID
119 * XEONSP_DEVICE_MAX indicates any vendor
120 * @param from The device pointer to start search from.
122 * @return Pointer to the device struct. When there are multiple device
123 * instances, the caller should continue search upon a non-NULL match.
125 struct device *dev_find_all_devices_on_domain(struct device *domain, u16 vendor,
126 u16 device, struct device *from)
128 struct device *dev = from;
129 while ((dev = dev_bus_each_child(domain->downstream, dev))) {
130 if (vendor != XEONSP_VENDOR_MAX && dev->vendor != vendor)
131 continue;
132 if (device != XEONSP_DEVICE_MAX && dev->device != device)
133 continue;
134 break;
137 return dev;
141 * Returns the socket ID where the specified device is connected to.
142 * This is an integer in the range [0, CONFIG_MAX_SOCKET).
144 * @param dev The device to look up
146 * @return Socket ID the device is attached to, negative number on error.
148 int iio_pci_domain_socket_from_dev(struct device *dev)
150 struct device *domain;
151 union xeon_domain_path dn;
153 if (dev->path.type == DEVICE_PATH_DOMAIN)
154 domain = dev;
155 else
156 domain = dev_get_pci_domain(dev);
158 if (!domain)
159 return -1;
161 dn.domain_path = domain->path.domain.domain;
163 return dn.socket;
167 * Returns the stack ID where the specified device is connected to.
168 * This is an integer in the range [0, MAX_IIO_STACK).
170 * @param dev The device to look up
172 * @return Stack ID the device is attached to, negative number on error.
174 int iio_pci_domain_stack_from_dev(struct device *dev)
176 struct device *domain;
177 union xeon_domain_path dn;
179 if (dev->path.type == DEVICE_PATH_DOMAIN)
180 domain = dev;
181 else
182 domain = dev_get_pci_domain(dev);
184 if (!domain)
185 return -1;
187 dn.domain_path = domain->path.domain.domain;
189 return dn.stack;
192 void iio_pci_domain_read_resources(struct device *dev)
194 struct resource *res;
195 const STACK_RES *sr = domain_to_stack_res(dev);
197 if (!sr)
198 return;
200 int index = 0;
202 if (is_domain0(dev)) {
203 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
204 res = new_resource(dev, index++);
205 res->base = 0;
206 res->size = 0x1000;
207 res->limit = 0xfff;
208 res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
211 if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
212 res = new_resource(dev, index++);
213 res->base = sr->PciResourceIoBase;
214 res->limit = sr->PciResourceIoLimit;
215 res->size = res->limit - res->base + 1;
216 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
219 if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
220 res = new_resource(dev, index++);
221 res->base = sr->PciResourceMem32Base;
222 res->limit = sr->PciResourceMem32Limit;
223 res->size = res->limit - res->base + 1;
224 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
227 if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
228 res = new_resource(dev, index++);
229 res->base = sr->PciResourceMem64Base;
230 res->limit = sr->PciResourceMem64Limit;
231 res->size = res->limit - res->base + 1;
232 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
237 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
238 * all the bus numbers on the IIO stack can be used for this bridge
240 static struct device_operations iio_pcie_domain_ops = {
241 .read_resources = iio_pci_domain_read_resources,
242 .set_resources = pci_domain_set_resources,
243 .scan_bus = pci_host_bridge_scan_bus,
244 #if CONFIG(HAVE_ACPI_TABLES)
245 .acpi_name = soc_acpi_name,
246 .write_acpi_tables = northbridge_write_acpi_tables,
247 .acpi_fill_ssdt = pci_domain_fill_ssdt,
248 #endif
252 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
253 * only one bus with UBOX devices. UBOX devices have no resources.
255 static struct device_operations ubox_pcie_domain_ops = {
256 .read_resources = noop_read_resources,
257 .set_resources = noop_set_resources,
258 .scan_bus = pci_host_bridge_scan_bus,
259 #if CONFIG(HAVE_ACPI_TABLES)
260 .acpi_name = soc_acpi_name,
261 .write_acpi_tables = northbridge_write_acpi_tables,
262 .acpi_fill_ssdt = pci_domain_fill_ssdt,
263 #endif
266 static void soc_create_domains(const union xeon_domain_path dp, struct bus *upstream,
267 int bus_base, int bus_limit, const char *type,
268 struct device_operations *ops,
269 const size_t pci_segment_group)
271 struct device_path path;
272 init_xeon_domain_path(&path, dp.socket, dp.stack, bus_base);
274 struct device *const domain = alloc_find_dev(upstream, &path);
275 if (!domain)
276 die("%s: out of memory.\n", __func__);
278 domain->ops = ops;
279 iio_domain_set_acpi_name(domain, type);
281 struct bus *const bus = alloc_bus(domain);
282 bus->secondary = bus_base;
283 bus->subordinate = bus_base;
284 bus->max_subordinate = bus_limit;
285 bus->segment_group = pci_segment_group;
289 static void soc_create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
290 const STACK_RES *sr, const size_t pci_segment_group)
292 soc_create_domains(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
293 &iio_pcie_domain_ops, pci_segment_group);
297 * On the first Xeon-SP generations there are no separate UBOX stacks,
298 * and the UBOX devices reside on the first and second IIO. Starting
299 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
301 static void soc_create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
302 const STACK_RES *sr, const size_t pci_segment_group)
304 /* Only expect 2 UBOX buses here */
305 assert(sr->BusBase + 1 == sr->BusLimit);
307 soc_create_domains(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
308 &ubox_pcie_domain_ops, pci_segment_group);
309 soc_create_domains(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
310 &ubox_pcie_domain_ops, pci_segment_group);
313 #if CONFIG(SOC_INTEL_HAS_CXL)
314 void iio_cxl_domain_read_resources(struct device *dev)
316 struct resource *res;
317 const STACK_RES *sr = domain_to_stack_res(dev);
319 if (!sr)
320 return;
322 int index = 0;
324 if (sr->IoBase < sr->PciResourceIoBase) {
325 res = new_resource(dev, index++);
326 res->base = sr->IoBase;
327 res->limit = sr->PciResourceIoBase - 1;
328 res->size = res->limit - res->base + 1;
329 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
332 if (sr->Mmio32Base < sr->PciResourceMem32Base) {
333 res = new_resource(dev, index++);
334 res->base = sr->Mmio32Base;
335 res->limit = sr->PciResourceMem32Base - 1;
336 res->size = res->limit - res->base + 1;
337 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
340 if (sr->Mmio64Base < sr->PciResourceMem64Base) {
341 res = new_resource(dev, index++);
342 res->base = sr->Mmio64Base;
343 res->limit = sr->PciResourceMem64Base - 1;
344 res->size = res->limit - res->base + 1;
345 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
349 static struct device_operations iio_cxl_domain_ops = {
350 .read_resources = iio_cxl_domain_read_resources,
351 .set_resources = pci_domain_set_resources,
352 .scan_bus = pci_host_bridge_scan_bus,
353 #if CONFIG(HAVE_ACPI_TABLES)
354 .acpi_name = soc_acpi_name,
355 .write_acpi_tables = northbridge_write_acpi_tables,
356 .acpi_fill_ssdt = pci_domain_fill_ssdt,
357 #endif
360 void soc_create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
361 const STACK_RES *sr, const size_t pci_segment_group)
363 assert(sr->BusBase + 1 <= sr->BusLimit);
365 /* 1st domain contains PCIe RCiEPs */
366 soc_create_domains(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
367 &iio_pcie_domain_ops, pci_segment_group);
368 /* 2nd domain contains CXL 1.1 end-points */
369 soc_create_domains(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
370 &iio_cxl_domain_ops, pci_segment_group);
372 #endif //CONFIG(SOC_INTEL_HAS_CXL)
374 /* Attach stack as domains */
375 void attach_iio_stacks(void)
377 const IIO_UDS *hob = get_iio_uds();
378 union xeon_domain_path dn = { .domain_path = 0 };
379 if (!hob)
380 return;
382 struct bus *root_bus = dev_root.downstream;
383 for (int s = 0; s < CONFIG_MAX_SOCKET; ++s) {
384 if (!soc_cpu_is_enabled(s))
385 continue;
386 for (int x = 0; x < MAX_LOGIC_IIO_STACK; ++x) {
387 const STACK_RES *ri = &hob->PlatformData.IIO_resource[s].StackRes[x];
388 const size_t seg = hob->PlatformData.CpuQpiInfo[s].PcieSegment;
390 if (ri->BusBase > ri->BusLimit)
391 continue;
393 /* Prepare domain path */
394 dn.socket = s;
395 dn.stack = x;
397 if (is_ubox_stack_res(ri))
398 soc_create_ubox_domains(dn, root_bus, ri, seg);
399 else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(ri))
400 soc_create_cxl_domains(dn, root_bus, ri, seg);
401 else if (is_pcie_iio_stack_res(ri))
402 soc_create_pcie_domains(dn, root_bus, ri, seg);
403 else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(ri))
404 soc_create_ioat_domains(dn, root_bus, ri, seg);
409 bool is_pcie_domain(struct device *dev)
411 if ((!dev) || (dev->path.type != DEVICE_PATH_DOMAIN))
412 return false;
414 return strstr(dev->name, DOMAIN_TYPE_PCIE);
417 bool is_ioat_domain(struct device *dev)
419 if ((!dev) || (dev->path.type != DEVICE_PATH_DOMAIN))
420 return false;
422 return (strstr(dev->name, DOMAIN_TYPE_CPM0) ||
423 strstr(dev->name, DOMAIN_TYPE_CPM1) ||
424 strstr(dev->name, DOMAIN_TYPE_DINO) ||
425 strstr(dev->name, DOMAIN_TYPE_HQM0) ||
426 strstr(dev->name, DOMAIN_TYPE_HQM1));
429 bool is_ubox_domain(struct device *dev)
431 if ((!dev) || (dev->path.type != DEVICE_PATH_DOMAIN))
432 return false;
434 return (strstr(dev->name, DOMAIN_TYPE_UBX0) ||
435 strstr(dev->name, DOMAIN_TYPE_UBX1));
438 bool is_cxl_domain(struct device *dev)
440 if ((!dev) || (dev->path.type != DEVICE_PATH_DOMAIN))
441 return false;
443 return strstr(dev->name, DOMAIN_TYPE_CXL);