1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <acpi/acpigen_pci.h>
5 #include <device/pci.h>
6 #include <device/pci_ids.h>
7 #include <soc/pci_devs.h>
8 #include <intelblocks/acpi.h>
9 #include <intelblocks/vtd.h>
11 #include <soc/chip_common.h>
12 #include <soc/soc_util.h>
15 static const STACK_RES
*domain_to_stack_res(const struct device
*dev
)
17 assert(dev
->path
.type
== DEVICE_PATH_DOMAIN
);
18 const union xeon_domain_path dn
= {
19 .domain_path
= dev_get_domain_id(dev
)
22 const IIO_UDS
*hob
= get_iio_uds();
25 return &hob
->PlatformData
.IIO_resource
[dn
.socket
].StackRes
[dn
.stack
];
28 static void iio_pci_domain_read_resources(struct device
*dev
)
30 const STACK_RES
*sr
= domain_to_stack_res(dev
);
37 if (is_domain0(dev
)) {
38 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
39 struct resource
*res
= new_resource(dev
, index
++);
43 res
->flags
= IORESOURCE_IO
| IORESOURCE_SUBTRACTIVE
| IORESOURCE_ASSIGNED
;
46 if (sr
->PciResourceIoBase
< sr
->PciResourceIoLimit
)
47 domain_io_window_from_to(dev
, index
++,
48 sr
->PciResourceIoBase
, sr
->PciResourceIoLimit
+ 1);
50 if (sr
->PciResourceMem32Base
< sr
->PciResourceMem32Limit
)
51 domain_mem_window_from_to(dev
, index
++,
52 sr
->PciResourceMem32Base
, sr
->PciResourceMem32Limit
+ 1);
54 if (sr
->PciResourceMem64Base
< sr
->PciResourceMem64Limit
)
55 domain_mem_window_from_to(dev
, index
++,
56 sr
->PciResourceMem64Base
, sr
->PciResourceMem64Limit
+ 1);
58 /* Declare domain reserved MMIO */
59 uint64_t reserved_mmio
= sr
->VtdBarAddress
+ vtd_probe_bar_size(pcidev_on_root(0, 0));
60 if ((reserved_mmio
>= sr
->PciResourceMem32Base
) &&
61 (reserved_mmio
<= sr
->PciResourceMem32Limit
))
62 mmio_range(dev
, index
++, reserved_mmio
,
63 sr
->PciResourceMem32Limit
- reserved_mmio
+ 1);
67 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
68 * all the bus numbers on the IIO stack can be used for this bridge
70 static struct device_operations iio_pcie_domain_ops
= {
71 .read_resources
= iio_pci_domain_read_resources
,
72 .set_resources
= pci_domain_set_resources
,
73 .scan_bus
= pci_host_bridge_scan_bus
,
74 #if CONFIG(HAVE_ACPI_TABLES)
75 .acpi_name
= soc_acpi_name
,
76 .write_acpi_tables
= northbridge_write_acpi_tables
,
77 .acpi_fill_ssdt
= pci_domain_fill_ssdt
,
82 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
83 * only one bus with UBOX devices. UBOX devices have no resources.
85 static struct device_operations ubox_pcie_domain_ops
= {
86 .read_resources
= noop_read_resources
,
87 .set_resources
= noop_set_resources
,
88 .scan_bus
= pci_host_bridge_scan_bus
,
89 #if CONFIG(HAVE_ACPI_TABLES)
90 .acpi_name
= soc_acpi_name
,
91 .write_acpi_tables
= northbridge_write_acpi_tables
,
92 .acpi_fill_ssdt
= pci_domain_fill_ssdt
,
96 static void create_pcie_domains(const union xeon_domain_path dp
, struct bus
*upstream
,
97 const STACK_RES
*sr
, const size_t pci_segment_group
)
99 create_domain(dp
, upstream
, sr
->BusBase
, sr
->BusLimit
, DOMAIN_TYPE_PCIE
,
100 &iio_pcie_domain_ops
, pci_segment_group
);
104 * On the first Xeon-SP generations there are no separate UBOX stacks,
105 * and the UBOX devices reside on the first and second IIO. Starting
106 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
108 static void create_ubox_domains(const union xeon_domain_path dp
, struct bus
*upstream
,
109 const STACK_RES
*sr
, const size_t pci_segment_group
)
111 /* Only expect 2 UBOX buses here */
112 assert(sr
->BusBase
+ 1 == sr
->BusLimit
);
114 create_domain(dp
, upstream
, sr
->BusBase
, sr
->BusBase
, DOMAIN_TYPE_UBX0
,
115 &ubox_pcie_domain_ops
, pci_segment_group
);
116 create_domain(dp
, upstream
, sr
->BusLimit
, sr
->BusLimit
, DOMAIN_TYPE_UBX1
,
117 &ubox_pcie_domain_ops
, pci_segment_group
);
120 void create_cxl_domains(const union xeon_domain_path dp
, struct bus
*bus
,
121 const STACK_RES
*sr
, const size_t pci_segment_group
);
123 #if CONFIG(SOC_INTEL_HAS_CXL)
124 static void iio_cxl_domain_read_resources(struct device
*dev
)
126 const STACK_RES
*sr
= domain_to_stack_res(dev
);
133 if (sr
->IoBase
< sr
->PciResourceIoBase
)
134 domain_io_window_from_to(dev
, index
++,
135 sr
->IoBase
, sr
->PciResourceIoBase
);
137 if (sr
->Mmio32Base
< sr
->PciResourceMem32Base
)
138 domain_mem_window_from_to(dev
, index
++,
139 sr
->Mmio32Base
, sr
->PciResourceMem32Base
);
141 if (sr
->Mmio64Base
< sr
->PciResourceMem64Base
)
142 domain_mem_window_from_to(dev
, index
++,
143 sr
->Mmio64Base
, sr
->PciResourceMem64Base
);
146 static struct device_operations iio_cxl_domain_ops
= {
147 .read_resources
= iio_cxl_domain_read_resources
,
148 .set_resources
= pci_domain_set_resources
,
149 .scan_bus
= pci_host_bridge_scan_bus
,
150 #if CONFIG(HAVE_ACPI_TABLES)
151 .acpi_name
= soc_acpi_name
,
152 .write_acpi_tables
= northbridge_write_acpi_tables
,
153 .acpi_fill_ssdt
= pci_domain_fill_ssdt
,
157 void create_cxl_domains(const union xeon_domain_path dp
, struct bus
*bus
,
158 const STACK_RES
*sr
, const size_t pci_segment_group
)
160 assert(sr
->BusBase
+ 1 <= sr
->BusLimit
);
162 /* 1st domain contains PCIe RCiEPs */
163 create_domain(dp
, bus
, sr
->BusBase
, sr
->BusBase
, DOMAIN_TYPE_PCIE
,
164 &iio_pcie_domain_ops
, pci_segment_group
);
165 /* 2nd domain contains CXL 1.1 end-points */
166 create_domain(dp
, bus
, sr
->BusBase
+ 1, sr
->BusLimit
, DOMAIN_TYPE_CXL
,
167 &iio_cxl_domain_ops
, pci_segment_group
);
169 #endif //CONFIG(SOC_INTEL_HAS_CXL)
171 void create_xeonsp_domains(const union xeon_domain_path dp
, struct bus
*bus
,
172 const STACK_RES
*sr
, const size_t pci_segment_group
)
174 if (is_ubox_stack_res(sr
))
175 create_ubox_domains(dp
, bus
, sr
, pci_segment_group
);
176 else if (CONFIG(SOC_INTEL_HAS_CXL
) && is_iio_cxl_stack_res(sr
))
177 create_cxl_domains(dp
, bus
, sr
, pci_segment_group
);
178 else if (is_pcie_iio_stack_res(sr
))
179 create_pcie_domains(dp
, bus
, sr
, pci_segment_group
);
180 else if (CONFIG(HAVE_IOAT_DOMAINS
) && is_ioat_iio_stack_res(sr
))
181 create_ioat_domains(dp
, bus
, sr
, pci_segment_group
);