1 // SPDX-License-Identifier: GPL-2.0
5 * The Virtual I/O Translation Table (VIOT) describes the topology of
6 * para-virtual IOMMUs and the endpoints they manage. The OS uses it to
7 * initialize devices in the right order, preventing endpoints from issuing DMA
8 * before their IOMMU is ready.
10 * When binding a driver to a device, before calling the device driver's probe()
11 * method, the driver infrastructure calls dma_configure(). At that point the
12 * VIOT driver looks for an IOMMU associated to the device in the VIOT table.
13 * If an IOMMU exists and has been initialized, the VIOT driver initializes the
14 * device's IOMMU fwspec, allowing the DMA infrastructure to invoke the IOMMU
15 * ops when the device driver configures DMA mappings. If an IOMMU exists and
16 * hasn't yet been initialized, VIOT returns -EPROBE_DEFER to postpone probing
17 * the device until the IOMMU is available.
19 #define pr_fmt(fmt) "ACPI: VIOT: " fmt
21 #include <linux/acpi_viot.h>
22 #include <linux/fwnode.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/pci.h>
26 #include <linux/platform_device.h>
29 /* Node offset within the table */
31 struct fwnode_handle
*fwnode
;
32 struct list_head list
;
35 struct viot_endpoint
{
48 struct viot_iommu
*viommu
;
49 struct list_head list
;
52 static struct acpi_table_viot
*viot
;
53 static LIST_HEAD(viot_iommus
);
54 static LIST_HEAD(viot_pci_ranges
);
55 static LIST_HEAD(viot_mmio_endpoints
);
57 static int __init
viot_check_bounds(const struct acpi_viot_header
*hdr
)
59 struct acpi_viot_header
*start
, *end
, *hdr_end
;
61 start
= ACPI_ADD_PTR(struct acpi_viot_header
, viot
,
62 max_t(size_t, sizeof(*viot
), viot
->node_offset
));
63 end
= ACPI_ADD_PTR(struct acpi_viot_header
, viot
, viot
->header
.length
);
64 hdr_end
= ACPI_ADD_PTR(struct acpi_viot_header
, hdr
, sizeof(*hdr
));
66 if (hdr
< start
|| hdr_end
> end
) {
67 pr_err(FW_BUG
"Node pointer overflows\n");
70 if (hdr
->length
< sizeof(*hdr
)) {
71 pr_err(FW_BUG
"Empty node\n");
77 static int __init
viot_get_pci_iommu_fwnode(struct viot_iommu
*viommu
,
81 struct fwnode_handle
*fwnode
;
83 pdev
= pci_get_domain_bus_and_slot(segment
, PCI_BUS_NUM(bdf
),
86 pr_err("Could not find PCI IOMMU\n");
90 fwnode
= dev_fwnode(&pdev
->dev
);
93 * PCI devices aren't necessarily described by ACPI. Create a
94 * fwnode so the IOMMU subsystem can identify this device.
96 fwnode
= acpi_alloc_fwnode_static();
101 set_primary_fwnode(&pdev
->dev
, fwnode
);
103 viommu
->fwnode
= dev_fwnode(&pdev
->dev
);
108 static int __init
viot_get_mmio_iommu_fwnode(struct viot_iommu
*viommu
,
111 struct acpi_device
*adev
;
112 struct resource res
= {
115 .flags
= IORESOURCE_MEM
,
118 adev
= acpi_resource_consumer(&res
);
120 pr_err("Could not find MMIO IOMMU\n");
123 viommu
->fwnode
= &adev
->fwnode
;
127 static struct viot_iommu
* __init
viot_get_iommu(unsigned int offset
)
130 struct viot_iommu
*viommu
;
131 struct acpi_viot_header
*hdr
= ACPI_ADD_PTR(struct acpi_viot_header
,
134 struct acpi_viot_virtio_iommu_pci pci
;
135 struct acpi_viot_virtio_iommu_mmio mmio
;
136 } *node
= (void *)hdr
;
138 list_for_each_entry(viommu
, &viot_iommus
, list
)
139 if (viommu
->offset
== offset
)
142 if (viot_check_bounds(hdr
))
145 viommu
= kzalloc(sizeof(*viommu
), GFP_KERNEL
);
149 viommu
->offset
= offset
;
151 case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI
:
152 if (hdr
->length
< sizeof(node
->pci
))
155 ret
= viot_get_pci_iommu_fwnode(viommu
, node
->pci
.segment
,
158 case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO
:
159 if (hdr
->length
< sizeof(node
->mmio
))
162 ret
= viot_get_mmio_iommu_fwnode(viommu
,
163 node
->mmio
.base_address
);
171 list_add(&viommu
->list
, &viot_iommus
);
179 static int __init
viot_parse_node(const struct acpi_viot_header
*hdr
)
182 struct list_head
*list
;
183 struct viot_endpoint
*ep
;
185 struct acpi_viot_mmio mmio
;
186 struct acpi_viot_pci_range pci
;
187 } *node
= (void *)hdr
;
189 if (viot_check_bounds(hdr
))
192 if (hdr
->type
== ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI
||
193 hdr
->type
== ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO
)
196 ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
201 case ACPI_VIOT_NODE_PCI_RANGE
:
202 if (hdr
->length
< sizeof(node
->pci
)) {
203 pr_err(FW_BUG
"Invalid PCI node size\n");
207 ep
->segment_start
= node
->pci
.segment_start
;
208 ep
->segment_end
= node
->pci
.segment_end
;
209 ep
->bdf_start
= node
->pci
.bdf_start
;
210 ep
->bdf_end
= node
->pci
.bdf_end
;
211 ep
->endpoint_id
= node
->pci
.endpoint_start
;
212 ep
->viommu
= viot_get_iommu(node
->pci
.output_node
);
213 list
= &viot_pci_ranges
;
215 case ACPI_VIOT_NODE_MMIO
:
216 if (hdr
->length
< sizeof(node
->mmio
)) {
217 pr_err(FW_BUG
"Invalid MMIO node size\n");
221 ep
->address
= node
->mmio
.base_address
;
222 ep
->endpoint_id
= node
->mmio
.endpoint
;
223 ep
->viommu
= viot_get_iommu(node
->mmio
.output_node
);
224 list
= &viot_mmio_endpoints
;
227 pr_warn("Unsupported node %x\n", hdr
->type
);
233 pr_warn("No IOMMU node found\n");
235 * A future version of the table may use the node for other
236 * purposes. Keep parsing.
242 list_add(&ep
->list
, list
);
251 * acpi_viot_early_init - Test the presence of VIOT and enable ACS
253 * If the VIOT does exist, ACS must be enabled. This cannot be
254 * done in acpi_viot_init() which is called after the bus scan
256 void __init
acpi_viot_early_init(void)
260 struct acpi_table_header
*hdr
;
262 status
= acpi_get_table(ACPI_SIG_VIOT
, 0, &hdr
);
263 if (ACPI_FAILURE(status
))
271 * acpi_viot_init - Parse the VIOT table
273 * Parse the VIOT table, prepare the list of endpoints to be used during DMA
276 void __init
acpi_viot_init(void)
280 struct acpi_table_header
*hdr
;
281 struct acpi_viot_header
*node
;
283 status
= acpi_get_table(ACPI_SIG_VIOT
, 0, &hdr
);
284 if (ACPI_FAILURE(status
)) {
285 if (status
!= AE_NOT_FOUND
) {
286 const char *msg
= acpi_format_exception(status
);
288 pr_err("Failed to get table, %s\n", msg
);
295 node
= ACPI_ADD_PTR(struct acpi_viot_header
, viot
, viot
->node_offset
);
296 for (i
= 0; i
< viot
->node_count
; i
++) {
297 if (viot_parse_node(node
))
300 node
= ACPI_ADD_PTR(struct acpi_viot_header
, node
,
307 static int viot_dev_iommu_init(struct device
*dev
, struct viot_iommu
*viommu
,
310 if (!viommu
|| !IS_ENABLED(CONFIG_VIRTIO_IOMMU
))
313 /* We're not translating ourself */
314 if (device_match_fwnode(dev
, viommu
->fwnode
))
317 return acpi_iommu_fwspec_init(dev
, epid
, viommu
->fwnode
);
320 static int viot_pci_dev_iommu_init(struct pci_dev
*pdev
, u16 dev_id
, void *data
)
323 struct viot_endpoint
*ep
;
324 struct device
*aliased_dev
= data
;
325 u32 domain_nr
= pci_domain_nr(pdev
->bus
);
327 list_for_each_entry(ep
, &viot_pci_ranges
, list
) {
328 if (domain_nr
>= ep
->segment_start
&&
329 domain_nr
<= ep
->segment_end
&&
330 dev_id
>= ep
->bdf_start
&&
331 dev_id
<= ep
->bdf_end
) {
332 epid
= ((domain_nr
- ep
->segment_start
) << 16) +
333 dev_id
- ep
->bdf_start
+ ep
->endpoint_id
;
335 return viot_dev_iommu_init(aliased_dev
, ep
->viommu
,
342 static int viot_mmio_dev_iommu_init(struct platform_device
*pdev
)
344 struct resource
*mem
;
345 struct viot_endpoint
*ep
;
347 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
351 list_for_each_entry(ep
, &viot_mmio_endpoints
, list
) {
352 if (ep
->address
== mem
->start
)
353 return viot_dev_iommu_init(&pdev
->dev
, ep
->viommu
,
360 * viot_iommu_configure - Setup IOMMU ops for an endpoint described by VIOT
363 * Return: 0 on success, <0 on failure
365 int viot_iommu_configure(struct device
*dev
)
368 return pci_for_each_dma_alias(to_pci_dev(dev
),
369 viot_pci_dev_iommu_init
, dev
);
370 else if (dev_is_platform(dev
))
371 return viot_mmio_dev_iommu_init(to_platform_device(dev
));