1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016, Semihalf
4 * Author: Tomasz Nowicki <tn@semihalf.com>
6 * This file implements early detection/parsing of I/O mapping
7 * reported to OS through firmware via I/O Remapping Table (IORT)
8 * IORT document number: ARM DEN 0049A
11 #define pr_fmt(fmt) "ACPI: IORT: " fmt
13 #include <linux/acpi_iort.h>
14 #include <linux/bitfield.h>
15 #include <linux/iommu.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/dma-map-ops.h>
23 #define IORT_TYPE_MASK(type) (1 << (type))
24 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
25 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
26 (1 << ACPI_IORT_NODE_SMMU_V3))
28 struct iort_its_msi_chip
{
29 struct list_head list
;
30 struct fwnode_handle
*fw_node
;
31 phys_addr_t base_addr
;
36 struct list_head list
;
37 struct acpi_iort_node
*iort_node
;
38 struct fwnode_handle
*fwnode
;
40 static LIST_HEAD(iort_fwnode_list
);
41 static DEFINE_SPINLOCK(iort_fwnode_lock
);
44 * iort_set_fwnode() - Create iort_fwnode and use it to register
45 * iommu data in the iort_fwnode_list
47 * @iort_node: IORT table node associated with the IOMMU
48 * @fwnode: fwnode associated with the IORT node
50 * Returns: 0 on success
53 static inline int iort_set_fwnode(struct acpi_iort_node
*iort_node
,
54 struct fwnode_handle
*fwnode
)
56 struct iort_fwnode
*np
;
58 np
= kzalloc(sizeof(struct iort_fwnode
), GFP_ATOMIC
);
63 INIT_LIST_HEAD(&np
->list
);
64 np
->iort_node
= iort_node
;
67 spin_lock(&iort_fwnode_lock
);
68 list_add_tail(&np
->list
, &iort_fwnode_list
);
69 spin_unlock(&iort_fwnode_lock
);
75 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
77 * @node: IORT table node to be looked-up
79 * Returns: fwnode_handle pointer on success, NULL on failure
81 static inline struct fwnode_handle
*iort_get_fwnode(
82 struct acpi_iort_node
*node
)
84 struct iort_fwnode
*curr
;
85 struct fwnode_handle
*fwnode
= NULL
;
87 spin_lock(&iort_fwnode_lock
);
88 list_for_each_entry(curr
, &iort_fwnode_list
, list
) {
89 if (curr
->iort_node
== node
) {
90 fwnode
= curr
->fwnode
;
94 spin_unlock(&iort_fwnode_lock
);
100 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
102 * @node: IORT table node associated with fwnode to delete
104 static inline void iort_delete_fwnode(struct acpi_iort_node
*node
)
106 struct iort_fwnode
*curr
, *tmp
;
108 spin_lock(&iort_fwnode_lock
);
109 list_for_each_entry_safe(curr
, tmp
, &iort_fwnode_list
, list
) {
110 if (curr
->iort_node
== node
) {
111 list_del(&curr
->list
);
116 spin_unlock(&iort_fwnode_lock
);
120 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
122 * @fwnode: fwnode associated with device to be looked-up
124 * Returns: iort_node pointer on success, NULL on failure
126 static inline struct acpi_iort_node
*iort_get_iort_node(
127 struct fwnode_handle
*fwnode
)
129 struct iort_fwnode
*curr
;
130 struct acpi_iort_node
*iort_node
= NULL
;
132 spin_lock(&iort_fwnode_lock
);
133 list_for_each_entry(curr
, &iort_fwnode_list
, list
) {
134 if (curr
->fwnode
== fwnode
) {
135 iort_node
= curr
->iort_node
;
139 spin_unlock(&iort_fwnode_lock
);
144 typedef acpi_status (*iort_find_node_callback
)
145 (struct acpi_iort_node
*node
, void *context
);
147 /* Root pointer to the mapped IORT table */
148 static struct acpi_table_header
*iort_table
;
150 static LIST_HEAD(iort_msi_chip_list
);
151 static DEFINE_SPINLOCK(iort_msi_chip_lock
);
154 * iort_register_domain_token() - register domain token along with related
155 * ITS ID and base address to the list from where we can get it back later on.
157 * @base: ITS base address.
158 * @fw_node: Domain token.
160 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
162 int iort_register_domain_token(int trans_id
, phys_addr_t base
,
163 struct fwnode_handle
*fw_node
)
165 struct iort_its_msi_chip
*its_msi_chip
;
167 its_msi_chip
= kzalloc(sizeof(*its_msi_chip
), GFP_KERNEL
);
171 its_msi_chip
->fw_node
= fw_node
;
172 its_msi_chip
->translation_id
= trans_id
;
173 its_msi_chip
->base_addr
= base
;
175 spin_lock(&iort_msi_chip_lock
);
176 list_add(&its_msi_chip
->list
, &iort_msi_chip_list
);
177 spin_unlock(&iort_msi_chip_lock
);
183 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
188 void iort_deregister_domain_token(int trans_id
)
190 struct iort_its_msi_chip
*its_msi_chip
, *t
;
192 spin_lock(&iort_msi_chip_lock
);
193 list_for_each_entry_safe(its_msi_chip
, t
, &iort_msi_chip_list
, list
) {
194 if (its_msi_chip
->translation_id
== trans_id
) {
195 list_del(&its_msi_chip
->list
);
200 spin_unlock(&iort_msi_chip_lock
);
204 * iort_find_domain_token() - Find domain token based on given ITS ID
207 * Returns: domain token when find on the list, NULL otherwise
209 struct fwnode_handle
*iort_find_domain_token(int trans_id
)
211 struct fwnode_handle
*fw_node
= NULL
;
212 struct iort_its_msi_chip
*its_msi_chip
;
214 spin_lock(&iort_msi_chip_lock
);
215 list_for_each_entry(its_msi_chip
, &iort_msi_chip_list
, list
) {
216 if (its_msi_chip
->translation_id
== trans_id
) {
217 fw_node
= its_msi_chip
->fw_node
;
221 spin_unlock(&iort_msi_chip_lock
);
226 static struct acpi_iort_node
*iort_scan_node(enum acpi_iort_node_type type
,
227 iort_find_node_callback callback
,
230 struct acpi_iort_node
*iort_node
, *iort_end
;
231 struct acpi_table_iort
*iort
;
237 /* Get the first IORT node */
238 iort
= (struct acpi_table_iort
*)iort_table
;
239 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
241 iort_end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
244 for (i
= 0; i
< iort
->node_count
; i
++) {
245 if (WARN_TAINT(iort_node
>= iort_end
, TAINT_FIRMWARE_WORKAROUND
,
246 "IORT node pointer overflows, bad table!\n"))
249 if (iort_node
->type
== type
&&
250 ACPI_SUCCESS(callback(iort_node
, context
)))
253 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_node
,
260 static acpi_status
iort_match_node_callback(struct acpi_iort_node
*node
,
263 struct device
*dev
= context
;
264 acpi_status status
= AE_NOT_FOUND
;
266 if (node
->type
== ACPI_IORT_NODE_NAMED_COMPONENT
) {
267 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
268 struct acpi_device
*adev
;
269 struct acpi_iort_named_component
*ncomp
;
270 struct device
*nc_dev
= dev
;
273 * Walk the device tree to find a device with an
274 * ACPI companion; there is no point in scanning
275 * IORT for a device matching a named component if
276 * the device does not have an ACPI companion to
280 adev
= ACPI_COMPANION(nc_dev
);
284 nc_dev
= nc_dev
->parent
;
290 status
= acpi_get_name(adev
->handle
, ACPI_FULL_PATHNAME
, &buf
);
291 if (ACPI_FAILURE(status
)) {
292 dev_warn(nc_dev
, "Can't get device full path name\n");
296 ncomp
= (struct acpi_iort_named_component
*)node
->node_data
;
297 status
= !strcmp(ncomp
->device_name
, buf
.pointer
) ?
298 AE_OK
: AE_NOT_FOUND
;
299 acpi_os_free(buf
.pointer
);
300 } else if (node
->type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
) {
301 struct acpi_iort_root_complex
*pci_rc
;
304 bus
= to_pci_bus(dev
);
305 pci_rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
308 * It is assumed that PCI segment numbers maps one-to-one
309 * with root complexes. Each segment number can represent only
312 status
= pci_rc
->pci_segment_number
== pci_domain_nr(bus
) ?
313 AE_OK
: AE_NOT_FOUND
;
319 static int iort_id_map(struct acpi_iort_id_mapping
*map
, u8 type
, u32 rid_in
,
320 u32
*rid_out
, bool check_overlap
)
322 /* Single mapping does not care for input id */
323 if (map
->flags
& ACPI_IORT_ID_SINGLE_MAPPING
) {
324 if (type
== ACPI_IORT_NODE_NAMED_COMPONENT
||
325 type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
) {
326 *rid_out
= map
->output_base
;
330 pr_warn(FW_BUG
"[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
335 if (rid_in
< map
->input_base
||
336 (rid_in
> map
->input_base
+ map
->id_count
))
341 * We already found a mapping for this input ID at the end of
342 * another region. If it coincides with the start of this
343 * region, we assume the prior match was due to the off-by-1
344 * issue mentioned below, and allow it to be superseded.
345 * Otherwise, things are *really* broken, and we just disregard
346 * duplicate matches entirely to retain compatibility.
348 pr_err(FW_BUG
"[map %p] conflicting mapping for input ID 0x%x\n",
350 if (rid_in
!= map
->input_base
)
353 pr_err(FW_BUG
"applying workaround.\n");
356 *rid_out
= map
->output_base
+ (rid_in
- map
->input_base
);
359 * Due to confusion regarding the meaning of the id_count field (which
360 * carries the number of IDs *minus 1*), we may have to disregard this
361 * match if it is at the end of the range, and overlaps with the start
364 if (map
->id_count
> 0 && rid_in
== map
->input_base
+ map
->id_count
)
369 static struct acpi_iort_node
*iort_node_get_id(struct acpi_iort_node
*node
,
370 u32
*id_out
, int index
)
372 struct acpi_iort_node
*parent
;
373 struct acpi_iort_id_mapping
*map
;
375 if (!node
->mapping_offset
|| !node
->mapping_count
||
376 index
>= node
->mapping_count
)
379 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
380 node
->mapping_offset
+ index
* sizeof(*map
));
383 if (!map
->output_reference
) {
384 pr_err(FW_BUG
"[node %p type %d] ID map has NULL parent reference\n",
389 parent
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
390 map
->output_reference
);
392 if (map
->flags
& ACPI_IORT_ID_SINGLE_MAPPING
) {
393 if (node
->type
== ACPI_IORT_NODE_NAMED_COMPONENT
||
394 node
->type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
||
395 node
->type
== ACPI_IORT_NODE_SMMU_V3
||
396 node
->type
== ACPI_IORT_NODE_PMCG
) {
397 *id_out
= map
->output_base
;
405 static int iort_get_id_mapping_index(struct acpi_iort_node
*node
)
407 struct acpi_iort_smmu_v3
*smmu
;
408 struct acpi_iort_pmcg
*pmcg
;
410 switch (node
->type
) {
411 case ACPI_IORT_NODE_SMMU_V3
:
413 * SMMUv3 dev ID mapping index was introduced in revision 1
414 * table, not available in revision 0
416 if (node
->revision
< 1)
419 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
421 * ID mapping index is only ignored if all interrupts are
424 if (smmu
->event_gsiv
&& smmu
->pri_gsiv
&& smmu
->gerr_gsiv
428 if (smmu
->id_mapping_index
>= node
->mapping_count
) {
429 pr_err(FW_BUG
"[node %p type %d] ID mapping index overflows valid mappings\n",
434 return smmu
->id_mapping_index
;
435 case ACPI_IORT_NODE_PMCG
:
436 pmcg
= (struct acpi_iort_pmcg
*)node
->node_data
;
437 if (pmcg
->overflow_gsiv
|| node
->mapping_count
== 0)
446 static struct acpi_iort_node
*iort_node_map_id(struct acpi_iort_node
*node
,
447 u32 id_in
, u32
*id_out
,
452 /* Parse the ID mapping tree to find specified node type */
454 struct acpi_iort_id_mapping
*map
;
455 int i
, index
, rc
= 0;
456 u32 out_ref
= 0, map_id
= id
;
458 if (IORT_TYPE_MASK(node
->type
) & type_mask
) {
464 if (!node
->mapping_offset
|| !node
->mapping_count
)
467 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
468 node
->mapping_offset
);
471 if (!map
->output_reference
) {
472 pr_err(FW_BUG
"[node %p type %d] ID map has NULL parent reference\n",
478 * Get the special ID mapping index (if any) and skip its
479 * associated ID map to prevent erroneous multi-stage
480 * IORT ID translations.
482 index
= iort_get_id_mapping_index(node
);
484 /* Do the ID translation */
485 for (i
= 0; i
< node
->mapping_count
; i
++, map
++) {
486 /* if it is special mapping index, skip it */
490 rc
= iort_id_map(map
, node
->type
, map_id
, &id
, out_ref
);
494 out_ref
= map
->output_reference
;
497 if (i
== node
->mapping_count
&& !out_ref
)
500 node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
501 rc
? out_ref
: map
->output_reference
);
505 /* Map input ID to output ID unchanged on mapping failure */
512 static struct acpi_iort_node
*iort_node_map_platform_id(
513 struct acpi_iort_node
*node
, u32
*id_out
, u8 type_mask
,
516 struct acpi_iort_node
*parent
;
519 /* step 1: retrieve the initial dev id */
520 parent
= iort_node_get_id(node
, &id
, index
);
525 * optional step 2: map the initial dev id if its parent is not
526 * the target type we want, map it again for the use cases such
527 * as NC (named component) -> SMMU -> ITS. If the type is matched,
528 * return the initial dev id and its parent pointer directly.
530 if (!(IORT_TYPE_MASK(parent
->type
) & type_mask
))
531 parent
= iort_node_map_id(parent
, id
, id_out
, type_mask
);
539 static struct acpi_iort_node
*iort_find_dev_node(struct device
*dev
)
541 struct pci_bus
*pbus
;
543 if (!dev_is_pci(dev
)) {
544 struct acpi_iort_node
*node
;
546 * scan iort_fwnode_list to see if it's an iort platform
547 * device (such as SMMU, PMCG),its iort node already cached
548 * and associated with fwnode when iort platform devices
551 node
= iort_get_iort_node(dev
->fwnode
);
555 * if not, then it should be a platform device defined in
556 * DSDT/SSDT (with Named Component node in IORT)
558 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
559 iort_match_node_callback
, dev
);
562 pbus
= to_pci_dev(dev
)->bus
;
564 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX
,
565 iort_match_node_callback
, &pbus
->dev
);
569 * iort_msi_map_id() - Map a MSI input ID for a device
570 * @dev: The device for which the mapping is to be done.
571 * @input_id: The device input ID.
573 * Returns: mapped MSI ID on success, input ID otherwise
575 u32
iort_msi_map_id(struct device
*dev
, u32 input_id
)
577 struct acpi_iort_node
*node
;
580 node
= iort_find_dev_node(dev
);
584 iort_node_map_id(node
, input_id
, &dev_id
, IORT_MSI_TYPE
);
589 * iort_pmsi_get_dev_id() - Get the device id for a device
590 * @dev: The device for which the mapping is to be done.
591 * @dev_id: The device ID found.
593 * Returns: 0 for successful find a dev id, -ENODEV on error
595 int iort_pmsi_get_dev_id(struct device
*dev
, u32
*dev_id
)
598 struct acpi_iort_node
*node
;
600 node
= iort_find_dev_node(dev
);
604 index
= iort_get_id_mapping_index(node
);
605 /* if there is a valid index, go get the dev_id directly */
607 if (iort_node_get_id(node
, dev_id
, index
))
610 for (i
= 0; i
< node
->mapping_count
; i
++) {
611 if (iort_node_map_platform_id(node
, dev_id
,
620 static int __maybe_unused
iort_find_its_base(u32 its_id
, phys_addr_t
*base
)
622 struct iort_its_msi_chip
*its_msi_chip
;
625 spin_lock(&iort_msi_chip_lock
);
626 list_for_each_entry(its_msi_chip
, &iort_msi_chip_list
, list
) {
627 if (its_msi_chip
->translation_id
== its_id
) {
628 *base
= its_msi_chip
->base_addr
;
633 spin_unlock(&iort_msi_chip_lock
);
639 * iort_dev_find_its_id() - Find the ITS identifier for a device
642 * @idx: Index of the ITS identifier list.
643 * @its_id: ITS identifier.
645 * Returns: 0 on success, appropriate error value otherwise
647 static int iort_dev_find_its_id(struct device
*dev
, u32 id
,
648 unsigned int idx
, int *its_id
)
650 struct acpi_iort_its_group
*its
;
651 struct acpi_iort_node
*node
;
653 node
= iort_find_dev_node(dev
);
657 node
= iort_node_map_id(node
, id
, NULL
, IORT_MSI_TYPE
);
661 /* Move to ITS specific data */
662 its
= (struct acpi_iort_its_group
*)node
->node_data
;
663 if (idx
>= its
->its_count
) {
664 dev_err(dev
, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
665 idx
, its
->its_count
);
669 *its_id
= its
->identifiers
[idx
];
674 * iort_get_device_domain() - Find MSI domain related to a device
676 * @id: Requester ID for the device.
677 * @bus_token: irq domain bus token.
679 * Returns: the MSI domain for this device, NULL otherwise
681 struct irq_domain
*iort_get_device_domain(struct device
*dev
, u32 id
,
682 enum irq_domain_bus_token bus_token
)
684 struct fwnode_handle
*handle
;
687 if (iort_dev_find_its_id(dev
, id
, 0, &its_id
))
690 handle
= iort_find_domain_token(its_id
);
694 return irq_find_matching_fwnode(handle
, bus_token
);
697 static void iort_set_device_domain(struct device
*dev
,
698 struct acpi_iort_node
*node
)
700 struct acpi_iort_its_group
*its
;
701 struct acpi_iort_node
*msi_parent
;
702 struct acpi_iort_id_mapping
*map
;
703 struct fwnode_handle
*iort_fwnode
;
704 struct irq_domain
*domain
;
707 index
= iort_get_id_mapping_index(node
);
711 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
712 node
->mapping_offset
+ index
* sizeof(*map
));
715 if (!map
->output_reference
||
716 !(map
->flags
& ACPI_IORT_ID_SINGLE_MAPPING
)) {
717 pr_err(FW_BUG
"[node %p type %d] Invalid MSI mapping\n",
722 msi_parent
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
723 map
->output_reference
);
725 if (!msi_parent
|| msi_parent
->type
!= ACPI_IORT_NODE_ITS_GROUP
)
728 /* Move to ITS specific data */
729 its
= (struct acpi_iort_its_group
*)msi_parent
->node_data
;
731 iort_fwnode
= iort_find_domain_token(its
->identifiers
[0]);
735 domain
= irq_find_matching_fwnode(iort_fwnode
, DOMAIN_BUS_PLATFORM_MSI
);
737 dev_set_msi_domain(dev
, domain
);
741 * iort_get_platform_device_domain() - Find MSI domain related to a
743 * @dev: the dev pointer associated with the platform device
745 * Returns: the MSI domain for this device, NULL otherwise
747 static struct irq_domain
*iort_get_platform_device_domain(struct device
*dev
)
749 struct acpi_iort_node
*node
, *msi_parent
= NULL
;
750 struct fwnode_handle
*iort_fwnode
;
751 struct acpi_iort_its_group
*its
;
754 /* find its associated iort node */
755 node
= iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
756 iort_match_node_callback
, dev
);
760 /* then find its msi parent node */
761 for (i
= 0; i
< node
->mapping_count
; i
++) {
762 msi_parent
= iort_node_map_platform_id(node
, NULL
,
771 /* Move to ITS specific data */
772 its
= (struct acpi_iort_its_group
*)msi_parent
->node_data
;
774 iort_fwnode
= iort_find_domain_token(its
->identifiers
[0]);
778 return irq_find_matching_fwnode(iort_fwnode
, DOMAIN_BUS_PLATFORM_MSI
);
781 void acpi_configure_pmsi_domain(struct device
*dev
)
783 struct irq_domain
*msi_domain
;
785 msi_domain
= iort_get_platform_device_domain(dev
);
787 dev_set_msi_domain(dev
, msi_domain
);
790 #ifdef CONFIG_IOMMU_API
791 static struct acpi_iort_node
*iort_get_msi_resv_iommu(struct device
*dev
)
793 struct acpi_iort_node
*iommu
;
794 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
796 iommu
= iort_get_iort_node(fwspec
->iommu_fwnode
);
798 if (iommu
&& (iommu
->type
== ACPI_IORT_NODE_SMMU_V3
)) {
799 struct acpi_iort_smmu_v3
*smmu
;
801 smmu
= (struct acpi_iort_smmu_v3
*)iommu
->node_data
;
802 if (smmu
->model
== ACPI_IORT_SMMU_V3_HISILICON_HI161X
)
809 static inline const struct iommu_ops
*iort_fwspec_iommu_ops(struct device
*dev
)
811 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
813 return (fwspec
&& fwspec
->ops
) ? fwspec
->ops
: NULL
;
816 static inline int iort_add_device_replay(struct device
*dev
)
820 if (dev
->bus
&& !device_iommu_mapped(dev
))
821 err
= iommu_probe_device(dev
);
827 * iort_iommu_msi_get_resv_regions - Reserved region driver helper
828 * @dev: Device from iommu_get_resv_regions()
829 * @head: Reserved region list from iommu_get_resv_regions()
831 * Returns: Number of msi reserved regions on success (0 if platform
832 * doesn't require the reservation or no associated msi regions),
833 * appropriate error value otherwise. The ITS interrupt translation
834 * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
835 * are the msi reserved regions.
837 int iort_iommu_msi_get_resv_regions(struct device
*dev
, struct list_head
*head
)
839 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
840 struct acpi_iort_its_group
*its
;
841 struct acpi_iort_node
*iommu_node
, *its_node
= NULL
;
844 iommu_node
= iort_get_msi_resv_iommu(dev
);
849 * Current logic to reserve ITS regions relies on HW topologies
850 * where a given PCI or named component maps its IDs to only one
851 * ITS group; if a PCI or named component can map its IDs to
852 * different ITS groups through IORT mappings this function has
853 * to be reworked to ensure we reserve regions for all ITS groups
854 * a given PCI or named component may map IDs to.
857 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
858 its_node
= iort_node_map_id(iommu_node
,
860 NULL
, IORT_MSI_TYPE
);
868 /* Move to ITS specific data */
869 its
= (struct acpi_iort_its_group
*)its_node
->node_data
;
871 for (i
= 0; i
< its
->its_count
; i
++) {
874 if (!iort_find_its_base(its
->identifiers
[i
], &base
)) {
875 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
876 struct iommu_resv_region
*region
;
878 region
= iommu_alloc_resv_region(base
+ SZ_64K
, SZ_64K
,
879 prot
, IOMMU_RESV_MSI
);
881 list_add_tail(®ion
->list
, head
);
887 return (resv
== its
->its_count
) ? resv
: -ENODEV
;
890 static inline bool iort_iommu_driver_enabled(u8 type
)
893 case ACPI_IORT_NODE_SMMU_V3
:
894 return IS_ENABLED(CONFIG_ARM_SMMU_V3
);
895 case ACPI_IORT_NODE_SMMU
:
896 return IS_ENABLED(CONFIG_ARM_SMMU
);
898 pr_warn("IORT node type %u does not describe an SMMU\n", type
);
903 static int arm_smmu_iort_xlate(struct device
*dev
, u32 streamid
,
904 struct fwnode_handle
*fwnode
,
905 const struct iommu_ops
*ops
)
907 int ret
= iommu_fwspec_init(dev
, fwnode
, ops
);
910 ret
= iommu_fwspec_add_ids(dev
, &streamid
, 1);
915 static bool iort_pci_rc_supports_ats(struct acpi_iort_node
*node
)
917 struct acpi_iort_root_complex
*pci_rc
;
919 pci_rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
920 return pci_rc
->ats_attribute
& ACPI_IORT_ATS_SUPPORTED
;
923 static int iort_iommu_xlate(struct device
*dev
, struct acpi_iort_node
*node
,
926 const struct iommu_ops
*ops
;
927 struct fwnode_handle
*iort_fwnode
;
932 iort_fwnode
= iort_get_fwnode(node
);
937 * If the ops look-up fails, this means that either
938 * the SMMU drivers have not been probed yet or that
939 * the SMMU drivers are not built in the kernel;
940 * Depending on whether the SMMU drivers are built-in
941 * in the kernel or not, defer the IOMMU configuration
944 ops
= iommu_ops_from_fwnode(iort_fwnode
);
946 return iort_iommu_driver_enabled(node
->type
) ?
947 -EPROBE_DEFER
: -ENODEV
;
949 return arm_smmu_iort_xlate(dev
, streamid
, iort_fwnode
, ops
);
952 struct iort_pci_alias_info
{
954 struct acpi_iort_node
*node
;
957 static int iort_pci_iommu_init(struct pci_dev
*pdev
, u16 alias
, void *data
)
959 struct iort_pci_alias_info
*info
= data
;
960 struct acpi_iort_node
*parent
;
963 parent
= iort_node_map_id(info
->node
, alias
, &streamid
,
965 return iort_iommu_xlate(info
->dev
, parent
, streamid
);
968 static void iort_named_component_init(struct device
*dev
,
969 struct acpi_iort_node
*node
)
971 struct acpi_iort_named_component
*nc
;
972 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
977 nc
= (struct acpi_iort_named_component
*)node
->node_data
;
978 fwspec
->num_pasid_bits
= FIELD_GET(ACPI_IORT_NC_PASID_BITS
,
982 static int iort_nc_iommu_map(struct device
*dev
, struct acpi_iort_node
*node
)
984 struct acpi_iort_node
*parent
;
985 int err
= -ENODEV
, i
= 0;
990 parent
= iort_node_map_platform_id(node
, &streamid
,
995 err
= iort_iommu_xlate(dev
, parent
, streamid
);
996 } while (parent
&& !err
);
1001 static int iort_nc_iommu_map_id(struct device
*dev
,
1002 struct acpi_iort_node
*node
,
1005 struct acpi_iort_node
*parent
;
1008 parent
= iort_node_map_id(node
, *in_id
, &streamid
, IORT_IOMMU_TYPE
);
1010 return iort_iommu_xlate(dev
, parent
, streamid
);
1017 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1019 * @dev: device to configure
1020 * @id_in: optional input id const value pointer
1022 * Returns: iommu_ops pointer on configuration success
1023 * NULL on configuration failure
1025 const struct iommu_ops
*iort_iommu_configure_id(struct device
*dev
,
1028 struct acpi_iort_node
*node
;
1029 const struct iommu_ops
*ops
;
1033 * If we already translated the fwspec there
1034 * is nothing left to do, return the iommu_ops.
1036 ops
= iort_fwspec_iommu_ops(dev
);
1040 if (dev_is_pci(dev
)) {
1041 struct iommu_fwspec
*fwspec
;
1042 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
1043 struct iort_pci_alias_info info
= { .dev
= dev
};
1045 node
= iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX
,
1046 iort_match_node_callback
, &bus
->dev
);
1051 err
= pci_for_each_dma_alias(to_pci_dev(dev
),
1052 iort_pci_iommu_init
, &info
);
1054 fwspec
= dev_iommu_fwspec_get(dev
);
1055 if (fwspec
&& iort_pci_rc_supports_ats(node
))
1056 fwspec
->flags
|= IOMMU_FWSPEC_PCI_RC_ATS
;
1058 node
= iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
1059 iort_match_node_callback
, dev
);
1063 err
= id_in
? iort_nc_iommu_map_id(dev
, node
, id_in
) :
1064 iort_nc_iommu_map(dev
, node
);
1067 iort_named_component_init(dev
, node
);
1071 * If we have reason to believe the IOMMU driver missed the initial
1072 * add_device callback for dev, replay it to get things in order.
1075 ops
= iort_fwspec_iommu_ops(dev
);
1076 err
= iort_add_device_replay(dev
);
1079 /* Ignore all other errors apart from EPROBE_DEFER */
1080 if (err
== -EPROBE_DEFER
) {
1083 dev_dbg(dev
, "Adding to IOMMU failed: %d\n", err
);
1091 int iort_iommu_msi_get_resv_regions(struct device
*dev
, struct list_head
*head
)
1093 const struct iommu_ops
*iort_iommu_configure_id(struct device
*dev
,
1094 const u32
*input_id
)
1098 static int nc_dma_get_range(struct device
*dev
, u64
*size
)
1100 struct acpi_iort_node
*node
;
1101 struct acpi_iort_named_component
*ncomp
;
1103 node
= iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
1104 iort_match_node_callback
, dev
);
1108 ncomp
= (struct acpi_iort_named_component
*)node
->node_data
;
1110 *size
= ncomp
->memory_address_limit
>= 64 ? U64_MAX
:
1111 1ULL<<ncomp
->memory_address_limit
;
1116 static int rc_dma_get_range(struct device
*dev
, u64
*size
)
1118 struct acpi_iort_node
*node
;
1119 struct acpi_iort_root_complex
*rc
;
1120 struct pci_bus
*pbus
= to_pci_dev(dev
)->bus
;
1122 node
= iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX
,
1123 iort_match_node_callback
, &pbus
->dev
);
1124 if (!node
|| node
->revision
< 1)
1127 rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
1129 *size
= rc
->memory_address_limit
>= 64 ? U64_MAX
:
1130 1ULL<<rc
->memory_address_limit
;
1136 * iort_dma_setup() - Set-up device DMA parameters.
1138 * @dev: device to configure
1139 * @dma_addr: device DMA address result pointer
1140 * @dma_size: DMA range size result pointer
1142 void iort_dma_setup(struct device
*dev
, u64
*dma_addr
, u64
*dma_size
)
1144 u64 end
, mask
, dmaaddr
= 0, size
= 0, offset
= 0;
1148 * If @dev is expected to be DMA-capable then the bus code that created
1149 * it should have initialised its dma_mask pointer by this point. For
1150 * now, we'll continue the legacy behaviour of coercing it to the
1151 * coherent mask if not, but we'll no longer do so quietly.
1153 if (!dev
->dma_mask
) {
1154 dev_warn(dev
, "DMA mask not set\n");
1155 dev
->dma_mask
= &dev
->coherent_dma_mask
;
1158 if (dev
->coherent_dma_mask
)
1159 size
= max(dev
->coherent_dma_mask
, dev
->coherent_dma_mask
+ 1);
1163 ret
= acpi_dma_get_range(dev
, &dmaaddr
, &offset
, &size
);
1165 ret
= dev_is_pci(dev
) ? rc_dma_get_range(dev
, &size
)
1166 : nc_dma_get_range(dev
, &size
);
1170 * Limit coherent and dma mask based on size retrieved from
1173 end
= dmaaddr
+ size
- 1;
1174 mask
= DMA_BIT_MASK(ilog2(end
) + 1);
1175 dev
->bus_dma_limit
= end
;
1176 dev
->coherent_dma_mask
= mask
;
1177 *dev
->dma_mask
= mask
;
1180 *dma_addr
= dmaaddr
;
1183 ret
= dma_direct_set_offset(dev
, dmaaddr
+ offset
, dmaaddr
, size
);
1185 dev_dbg(dev
, "dma_offset(%#08llx)%s\n", offset
, ret
? " failed!" : "");
1188 static void __init
acpi_iort_register_irq(int hwirq
, const char *name
,
1190 struct resource
*res
)
1192 int irq
= acpi_register_gsi(NULL
, hwirq
, trigger
,
1196 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq
,
1203 res
->flags
= IORESOURCE_IRQ
;
1207 static int __init
arm_smmu_v3_count_resources(struct acpi_iort_node
*node
)
1209 struct acpi_iort_smmu_v3
*smmu
;
1210 /* Always present mem resource */
1213 /* Retrieve SMMUv3 specific data */
1214 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1216 if (smmu
->event_gsiv
)
1222 if (smmu
->gerr_gsiv
)
1225 if (smmu
->sync_gsiv
)
1231 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3
*smmu
)
1234 * Cavium ThunderX2 implementation doesn't not support unique
1235 * irq line. Use single irq line for all the SMMUv3 interrupts.
1237 if (smmu
->model
!= ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
)
1241 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1244 return smmu
->event_gsiv
== smmu
->pri_gsiv
&&
1245 smmu
->event_gsiv
== smmu
->gerr_gsiv
&&
1246 smmu
->event_gsiv
== smmu
->sync_gsiv
;
1249 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3
*smmu
)
1252 * Override the size, for Cavium ThunderX2 implementation
1253 * which doesn't support the page 1 SMMU register space.
1255 if (smmu
->model
== ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
)
1261 static void __init
arm_smmu_v3_init_resources(struct resource
*res
,
1262 struct acpi_iort_node
*node
)
1264 struct acpi_iort_smmu_v3
*smmu
;
1267 /* Retrieve SMMUv3 specific data */
1268 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1270 res
[num_res
].start
= smmu
->base_address
;
1271 res
[num_res
].end
= smmu
->base_address
+
1272 arm_smmu_v3_resource_size(smmu
) - 1;
1273 res
[num_res
].flags
= IORESOURCE_MEM
;
1276 if (arm_smmu_v3_is_combined_irq(smmu
)) {
1277 if (smmu
->event_gsiv
)
1278 acpi_iort_register_irq(smmu
->event_gsiv
, "combined",
1279 ACPI_EDGE_SENSITIVE
,
1283 if (smmu
->event_gsiv
)
1284 acpi_iort_register_irq(smmu
->event_gsiv
, "eventq",
1285 ACPI_EDGE_SENSITIVE
,
1289 acpi_iort_register_irq(smmu
->pri_gsiv
, "priq",
1290 ACPI_EDGE_SENSITIVE
,
1293 if (smmu
->gerr_gsiv
)
1294 acpi_iort_register_irq(smmu
->gerr_gsiv
, "gerror",
1295 ACPI_EDGE_SENSITIVE
,
1298 if (smmu
->sync_gsiv
)
1299 acpi_iort_register_irq(smmu
->sync_gsiv
, "cmdq-sync",
1300 ACPI_EDGE_SENSITIVE
,
1305 static void __init
arm_smmu_v3_dma_configure(struct device
*dev
,
1306 struct acpi_iort_node
*node
)
1308 struct acpi_iort_smmu_v3
*smmu
;
1309 enum dev_dma_attr attr
;
1311 /* Retrieve SMMUv3 specific data */
1312 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1314 attr
= (smmu
->flags
& ACPI_IORT_SMMU_V3_COHACC_OVERRIDE
) ?
1315 DEV_DMA_COHERENT
: DEV_DMA_NON_COHERENT
;
1317 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1318 dev
->dma_mask
= &dev
->coherent_dma_mask
;
1320 /* Configure DMA for the page table walker */
1321 acpi_dma_configure(dev
, attr
);
1324 #if defined(CONFIG_ACPI_NUMA)
1326 * set numa proximity domain for smmuv3 device
1328 static int __init
arm_smmu_v3_set_proximity(struct device
*dev
,
1329 struct acpi_iort_node
*node
)
1331 struct acpi_iort_smmu_v3
*smmu
;
1333 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1334 if (smmu
->flags
& ACPI_IORT_SMMU_V3_PXM_VALID
) {
1335 int dev_node
= pxm_to_node(smmu
->pxm
);
1337 if (dev_node
!= NUMA_NO_NODE
&& !node_online(dev_node
))
1340 set_dev_node(dev
, dev_node
);
1341 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1348 #define arm_smmu_v3_set_proximity NULL
1351 static int __init
arm_smmu_count_resources(struct acpi_iort_node
*node
)
1353 struct acpi_iort_smmu
*smmu
;
1355 /* Retrieve SMMU specific data */
1356 smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1359 * Only consider the global fault interrupt and ignore the
1360 * configuration access interrupt.
1362 * MMIO address and global fault interrupt resources are always
1363 * present so add them to the context interrupt count as a static
1366 return smmu
->context_interrupt_count
+ 2;
1369 static void __init
arm_smmu_init_resources(struct resource
*res
,
1370 struct acpi_iort_node
*node
)
1372 struct acpi_iort_smmu
*smmu
;
1373 int i
, hw_irq
, trigger
, num_res
= 0;
1374 u64
*ctx_irq
, *glb_irq
;
1376 /* Retrieve SMMU specific data */
1377 smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1379 res
[num_res
].start
= smmu
->base_address
;
1380 res
[num_res
].end
= smmu
->base_address
+ smmu
->span
- 1;
1381 res
[num_res
].flags
= IORESOURCE_MEM
;
1384 glb_irq
= ACPI_ADD_PTR(u64
, node
, smmu
->global_interrupt_offset
);
1386 hw_irq
= IORT_IRQ_MASK(glb_irq
[0]);
1387 trigger
= IORT_IRQ_TRIGGER_MASK(glb_irq
[0]);
1389 acpi_iort_register_irq(hw_irq
, "arm-smmu-global", trigger
,
1393 ctx_irq
= ACPI_ADD_PTR(u64
, node
, smmu
->context_interrupt_offset
);
1394 for (i
= 0; i
< smmu
->context_interrupt_count
; i
++) {
1395 hw_irq
= IORT_IRQ_MASK(ctx_irq
[i
]);
1396 trigger
= IORT_IRQ_TRIGGER_MASK(ctx_irq
[i
]);
1398 acpi_iort_register_irq(hw_irq
, "arm-smmu-context", trigger
,
1403 static void __init
arm_smmu_dma_configure(struct device
*dev
,
1404 struct acpi_iort_node
*node
)
1406 struct acpi_iort_smmu
*smmu
;
1407 enum dev_dma_attr attr
;
1409 /* Retrieve SMMU specific data */
1410 smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1412 attr
= (smmu
->flags
& ACPI_IORT_SMMU_COHERENT_WALK
) ?
1413 DEV_DMA_COHERENT
: DEV_DMA_NON_COHERENT
;
1415 /* We expect the dma masks to be equivalent for SMMU set-ups */
1416 dev
->dma_mask
= &dev
->coherent_dma_mask
;
1418 /* Configure DMA for the page table walker */
1419 acpi_dma_configure(dev
, attr
);
1422 static int __init
arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node
*node
)
1424 struct acpi_iort_pmcg
*pmcg
;
1426 /* Retrieve PMCG specific data */
1427 pmcg
= (struct acpi_iort_pmcg
*)node
->node_data
;
1430 * There are always 2 memory resources.
1431 * If the overflow_gsiv is present then add that for a total of 3.
1433 return pmcg
->overflow_gsiv
? 3 : 2;
1436 static void __init
arm_smmu_v3_pmcg_init_resources(struct resource
*res
,
1437 struct acpi_iort_node
*node
)
1439 struct acpi_iort_pmcg
*pmcg
;
1441 /* Retrieve PMCG specific data */
1442 pmcg
= (struct acpi_iort_pmcg
*)node
->node_data
;
1444 res
[0].start
= pmcg
->page0_base_address
;
1445 res
[0].end
= pmcg
->page0_base_address
+ SZ_4K
- 1;
1446 res
[0].flags
= IORESOURCE_MEM
;
1447 res
[1].start
= pmcg
->page1_base_address
;
1448 res
[1].end
= pmcg
->page1_base_address
+ SZ_4K
- 1;
1449 res
[1].flags
= IORESOURCE_MEM
;
1451 if (pmcg
->overflow_gsiv
)
1452 acpi_iort_register_irq(pmcg
->overflow_gsiv
, "overflow",
1453 ACPI_EDGE_SENSITIVE
, &res
[2]);
1456 static struct acpi_platform_list pmcg_plat_info
[] __initdata
= {
1457 /* HiSilicon Hip08 Platform */
1458 {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT
, greater_than_or_equal
,
1459 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08
},
1463 static int __init
arm_smmu_v3_pmcg_add_platdata(struct platform_device
*pdev
)
1468 idx
= acpi_match_platform_list(pmcg_plat_info
);
1470 model
= pmcg_plat_info
[idx
].data
;
1472 model
= IORT_SMMU_V3_PMCG_GENERIC
;
1474 return platform_device_add_data(pdev
, &model
, sizeof(model
));
1477 struct iort_dev_config
{
1479 int (*dev_init
)(struct acpi_iort_node
*node
);
1480 void (*dev_dma_configure
)(struct device
*dev
,
1481 struct acpi_iort_node
*node
);
1482 int (*dev_count_resources
)(struct acpi_iort_node
*node
);
1483 void (*dev_init_resources
)(struct resource
*res
,
1484 struct acpi_iort_node
*node
);
1485 int (*dev_set_proximity
)(struct device
*dev
,
1486 struct acpi_iort_node
*node
);
1487 int (*dev_add_platdata
)(struct platform_device
*pdev
);
1490 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst
= {
1491 .name
= "arm-smmu-v3",
1492 .dev_dma_configure
= arm_smmu_v3_dma_configure
,
1493 .dev_count_resources
= arm_smmu_v3_count_resources
,
1494 .dev_init_resources
= arm_smmu_v3_init_resources
,
1495 .dev_set_proximity
= arm_smmu_v3_set_proximity
,
1498 static const struct iort_dev_config iort_arm_smmu_cfg __initconst
= {
1500 .dev_dma_configure
= arm_smmu_dma_configure
,
1501 .dev_count_resources
= arm_smmu_count_resources
,
1502 .dev_init_resources
= arm_smmu_init_resources
,
1505 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst
= {
1506 .name
= "arm-smmu-v3-pmcg",
1507 .dev_count_resources
= arm_smmu_v3_pmcg_count_resources
,
1508 .dev_init_resources
= arm_smmu_v3_pmcg_init_resources
,
1509 .dev_add_platdata
= arm_smmu_v3_pmcg_add_platdata
,
1512 static __init
const struct iort_dev_config
*iort_get_dev_cfg(
1513 struct acpi_iort_node
*node
)
1515 switch (node
->type
) {
1516 case ACPI_IORT_NODE_SMMU_V3
:
1517 return &iort_arm_smmu_v3_cfg
;
1518 case ACPI_IORT_NODE_SMMU
:
1519 return &iort_arm_smmu_cfg
;
1520 case ACPI_IORT_NODE_PMCG
:
1521 return &iort_arm_smmu_v3_pmcg_cfg
;
1528 * iort_add_platform_device() - Allocate a platform device for IORT node
1529 * @node: Pointer to device ACPI IORT node
1530 * @ops: Pointer to IORT device config struct
1532 * Returns: 0 on success, <0 failure
1534 static int __init
iort_add_platform_device(struct acpi_iort_node
*node
,
1535 const struct iort_dev_config
*ops
)
1537 struct fwnode_handle
*fwnode
;
1538 struct platform_device
*pdev
;
1542 pdev
= platform_device_alloc(ops
->name
, PLATFORM_DEVID_AUTO
);
1546 if (ops
->dev_set_proximity
) {
1547 ret
= ops
->dev_set_proximity(&pdev
->dev
, node
);
1552 count
= ops
->dev_count_resources(node
);
1554 r
= kcalloc(count
, sizeof(*r
), GFP_KERNEL
);
1560 ops
->dev_init_resources(r
, node
);
1562 ret
= platform_device_add_resources(pdev
, r
, count
);
1564 * Resources are duplicated in platform_device_add_resources,
1565 * free their allocated memory
1573 * Platform devices based on PMCG nodes uses platform_data to
1574 * pass the hardware model info to the driver. For others, add
1575 * a copy of IORT node pointer to platform_data to be used to
1576 * retrieve IORT data information.
1578 if (ops
->dev_add_platdata
)
1579 ret
= ops
->dev_add_platdata(pdev
);
1581 ret
= platform_device_add_data(pdev
, &node
, sizeof(node
));
1586 fwnode
= iort_get_fwnode(node
);
1593 pdev
->dev
.fwnode
= fwnode
;
1595 if (ops
->dev_dma_configure
)
1596 ops
->dev_dma_configure(&pdev
->dev
, node
);
1598 iort_set_device_domain(&pdev
->dev
, node
);
1600 ret
= platform_device_add(pdev
);
1602 goto dma_deconfigure
;
1607 arch_teardown_dma_ops(&pdev
->dev
);
1609 platform_device_put(pdev
);
1615 static void __init
iort_enable_acs(struct acpi_iort_node
*iort_node
)
1617 static bool acs_enabled __initdata
;
1622 if (iort_node
->type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
) {
1623 struct acpi_iort_node
*parent
;
1624 struct acpi_iort_id_mapping
*map
;
1627 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, iort_node
,
1628 iort_node
->mapping_offset
);
1630 for (i
= 0; i
< iort_node
->mapping_count
; i
++, map
++) {
1631 if (!map
->output_reference
)
1634 parent
= ACPI_ADD_PTR(struct acpi_iort_node
,
1635 iort_table
, map
->output_reference
);
1637 * If we detect a RC->SMMU mapping, make sure
1638 * we enable ACS on the system.
1640 if ((parent
->type
== ACPI_IORT_NODE_SMMU
) ||
1641 (parent
->type
== ACPI_IORT_NODE_SMMU_V3
)) {
1650 static inline void iort_enable_acs(struct acpi_iort_node
*iort_node
) { }
1653 static void __init
iort_init_platform_devices(void)
1655 struct acpi_iort_node
*iort_node
, *iort_end
;
1656 struct acpi_table_iort
*iort
;
1657 struct fwnode_handle
*fwnode
;
1659 const struct iort_dev_config
*ops
;
1662 * iort_table and iort both point to the start of IORT table, but
1663 * have different struct types
1665 iort
= (struct acpi_table_iort
*)iort_table
;
1667 /* Get the first IORT node */
1668 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
1670 iort_end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
1671 iort_table
->length
);
1673 for (i
= 0; i
< iort
->node_count
; i
++) {
1674 if (iort_node
>= iort_end
) {
1675 pr_err("iort node pointer overflows, bad table\n");
1679 iort_enable_acs(iort_node
);
1681 ops
= iort_get_dev_cfg(iort_node
);
1683 fwnode
= acpi_alloc_fwnode_static();
1687 iort_set_fwnode(iort_node
, fwnode
);
1689 ret
= iort_add_platform_device(iort_node
, ops
);
1691 iort_delete_fwnode(iort_node
);
1692 acpi_free_fwnode_static(fwnode
);
1697 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_node
,
1702 void __init
acpi_iort_init(void)
1706 /* iort_table will be used at runtime after the iort init,
1707 * so we don't need to call acpi_put_table() to release
1708 * the IORT table mapping.
1710 status
= acpi_get_table(ACPI_SIG_IORT
, 0, &iort_table
);
1711 if (ACPI_FAILURE(status
)) {
1712 if (status
!= AE_NOT_FOUND
) {
1713 const char *msg
= acpi_format_exception(status
);
1715 pr_err("Failed to get table, %s\n", msg
);
1721 iort_init_platform_devices();
1724 #ifdef CONFIG_ZONE_DMA
1726 * Extract the highest CPU physical address accessible to all DMA masters in
1727 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1729 phys_addr_t __init
acpi_iort_dma_get_max_cpu_address(void)
1731 phys_addr_t limit
= PHYS_ADDR_MAX
;
1732 struct acpi_iort_node
*node
, *end
;
1733 struct acpi_table_iort
*iort
;
1740 status
= acpi_get_table(ACPI_SIG_IORT
, 0,
1741 (struct acpi_table_header
**)&iort
);
1742 if (ACPI_FAILURE(status
))
1745 node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
, iort
->node_offset
);
1746 end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
, iort
->header
.length
);
1748 for (i
= 0; i
< iort
->node_count
; i
++) {
1752 switch (node
->type
) {
1753 struct acpi_iort_named_component
*ncomp
;
1754 struct acpi_iort_root_complex
*rc
;
1755 phys_addr_t local_limit
;
1757 case ACPI_IORT_NODE_NAMED_COMPONENT
:
1758 ncomp
= (struct acpi_iort_named_component
*)node
->node_data
;
1759 local_limit
= DMA_BIT_MASK(ncomp
->memory_address_limit
);
1760 limit
= min_not_zero(limit
, local_limit
);
1763 case ACPI_IORT_NODE_PCI_ROOT_COMPLEX
:
1764 if (node
->revision
< 1)
1767 rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
1768 local_limit
= DMA_BIT_MASK(rc
->memory_address_limit
);
1769 limit
= min_not_zero(limit
, local_limit
);
1772 node
= ACPI_ADD_PTR(struct acpi_iort_node
, node
, node
->length
);
1774 acpi_put_table(&iort
->header
);