1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016, Semihalf
4 * Author: Tomasz Nowicki <tn@semihalf.com>
6 * This file implements early detection/parsing of I/O mapping
7 * reported to OS through firmware via I/O Remapping Table (IORT)
8 * IORT document number: ARM DEN 0049A
11 #define pr_fmt(fmt) "ACPI: IORT: " fmt
13 #include <linux/acpi_iort.h>
14 #include <linux/bitfield.h>
15 #include <linux/iommu.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/dma-map-ops.h>
24 #define IORT_TYPE_MASK(type) (1 << (type))
25 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
26 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
27 (1 << ACPI_IORT_NODE_SMMU_V3))
29 struct iort_its_msi_chip
{
30 struct list_head list
;
31 struct fwnode_handle
*fw_node
;
32 phys_addr_t base_addr
;
37 struct list_head list
;
38 struct acpi_iort_node
*iort_node
;
39 struct fwnode_handle
*fwnode
;
41 static LIST_HEAD(iort_fwnode_list
);
42 static DEFINE_SPINLOCK(iort_fwnode_lock
);
45 * iort_set_fwnode() - Create iort_fwnode and use it to register
46 * iommu data in the iort_fwnode_list
48 * @iort_node: IORT table node associated with the IOMMU
49 * @fwnode: fwnode associated with the IORT node
51 * Returns: 0 on success
54 static inline int iort_set_fwnode(struct acpi_iort_node
*iort_node
,
55 struct fwnode_handle
*fwnode
)
57 struct iort_fwnode
*np
;
59 np
= kzalloc(sizeof(struct iort_fwnode
), GFP_ATOMIC
);
64 INIT_LIST_HEAD(&np
->list
);
65 np
->iort_node
= iort_node
;
68 spin_lock(&iort_fwnode_lock
);
69 list_add_tail(&np
->list
, &iort_fwnode_list
);
70 spin_unlock(&iort_fwnode_lock
);
76 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
78 * @node: IORT table node to be looked-up
80 * Returns: fwnode_handle pointer on success, NULL on failure
82 static inline struct fwnode_handle
*iort_get_fwnode(
83 struct acpi_iort_node
*node
)
85 struct iort_fwnode
*curr
;
86 struct fwnode_handle
*fwnode
= NULL
;
88 spin_lock(&iort_fwnode_lock
);
89 list_for_each_entry(curr
, &iort_fwnode_list
, list
) {
90 if (curr
->iort_node
== node
) {
91 fwnode
= curr
->fwnode
;
95 spin_unlock(&iort_fwnode_lock
);
101 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
103 * @node: IORT table node associated with fwnode to delete
105 static inline void iort_delete_fwnode(struct acpi_iort_node
*node
)
107 struct iort_fwnode
*curr
, *tmp
;
109 spin_lock(&iort_fwnode_lock
);
110 list_for_each_entry_safe(curr
, tmp
, &iort_fwnode_list
, list
) {
111 if (curr
->iort_node
== node
) {
112 list_del(&curr
->list
);
117 spin_unlock(&iort_fwnode_lock
);
121 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
123 * @fwnode: fwnode associated with device to be looked-up
125 * Returns: iort_node pointer on success, NULL on failure
127 static inline struct acpi_iort_node
*iort_get_iort_node(
128 struct fwnode_handle
*fwnode
)
130 struct iort_fwnode
*curr
;
131 struct acpi_iort_node
*iort_node
= NULL
;
133 spin_lock(&iort_fwnode_lock
);
134 list_for_each_entry(curr
, &iort_fwnode_list
, list
) {
135 if (curr
->fwnode
== fwnode
) {
136 iort_node
= curr
->iort_node
;
140 spin_unlock(&iort_fwnode_lock
);
145 typedef acpi_status (*iort_find_node_callback
)
146 (struct acpi_iort_node
*node
, void *context
);
148 /* Root pointer to the mapped IORT table */
149 static struct acpi_table_header
*iort_table
;
151 static LIST_HEAD(iort_msi_chip_list
);
152 static DEFINE_SPINLOCK(iort_msi_chip_lock
);
155 * iort_register_domain_token() - register domain token along with related
156 * ITS ID and base address to the list from where we can get it back later on.
158 * @base: ITS base address.
159 * @fw_node: Domain token.
161 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
163 int iort_register_domain_token(int trans_id
, phys_addr_t base
,
164 struct fwnode_handle
*fw_node
)
166 struct iort_its_msi_chip
*its_msi_chip
;
168 its_msi_chip
= kzalloc(sizeof(*its_msi_chip
), GFP_KERNEL
);
172 its_msi_chip
->fw_node
= fw_node
;
173 its_msi_chip
->translation_id
= trans_id
;
174 its_msi_chip
->base_addr
= base
;
176 spin_lock(&iort_msi_chip_lock
);
177 list_add(&its_msi_chip
->list
, &iort_msi_chip_list
);
178 spin_unlock(&iort_msi_chip_lock
);
184 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
189 void iort_deregister_domain_token(int trans_id
)
191 struct iort_its_msi_chip
*its_msi_chip
, *t
;
193 spin_lock(&iort_msi_chip_lock
);
194 list_for_each_entry_safe(its_msi_chip
, t
, &iort_msi_chip_list
, list
) {
195 if (its_msi_chip
->translation_id
== trans_id
) {
196 list_del(&its_msi_chip
->list
);
201 spin_unlock(&iort_msi_chip_lock
);
205 * iort_find_domain_token() - Find domain token based on given ITS ID
208 * Returns: domain token when find on the list, NULL otherwise
210 struct fwnode_handle
*iort_find_domain_token(int trans_id
)
212 struct fwnode_handle
*fw_node
= NULL
;
213 struct iort_its_msi_chip
*its_msi_chip
;
215 spin_lock(&iort_msi_chip_lock
);
216 list_for_each_entry(its_msi_chip
, &iort_msi_chip_list
, list
) {
217 if (its_msi_chip
->translation_id
== trans_id
) {
218 fw_node
= its_msi_chip
->fw_node
;
222 spin_unlock(&iort_msi_chip_lock
);
227 static struct acpi_iort_node
*iort_scan_node(enum acpi_iort_node_type type
,
228 iort_find_node_callback callback
,
231 struct acpi_iort_node
*iort_node
, *iort_end
;
232 struct acpi_table_iort
*iort
;
238 /* Get the first IORT node */
239 iort
= (struct acpi_table_iort
*)iort_table
;
240 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
242 iort_end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
245 for (i
= 0; i
< iort
->node_count
; i
++) {
246 if (WARN_TAINT(iort_node
>= iort_end
, TAINT_FIRMWARE_WORKAROUND
,
247 "IORT node pointer overflows, bad table!\n"))
250 if (iort_node
->type
== type
&&
251 ACPI_SUCCESS(callback(iort_node
, context
)))
254 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_node
,
261 static acpi_status
iort_match_node_callback(struct acpi_iort_node
*node
,
264 struct device
*dev
= context
;
265 acpi_status status
= AE_NOT_FOUND
;
267 if (node
->type
== ACPI_IORT_NODE_NAMED_COMPONENT
) {
268 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
269 struct acpi_device
*adev
;
270 struct acpi_iort_named_component
*ncomp
;
271 struct device
*nc_dev
= dev
;
274 * Walk the device tree to find a device with an
275 * ACPI companion; there is no point in scanning
276 * IORT for a device matching a named component if
277 * the device does not have an ACPI companion to
281 adev
= ACPI_COMPANION(nc_dev
);
285 nc_dev
= nc_dev
->parent
;
291 status
= acpi_get_name(adev
->handle
, ACPI_FULL_PATHNAME
, &buf
);
292 if (ACPI_FAILURE(status
)) {
293 dev_warn(nc_dev
, "Can't get device full path name\n");
297 ncomp
= (struct acpi_iort_named_component
*)node
->node_data
;
298 status
= !strcmp(ncomp
->device_name
, buf
.pointer
) ?
299 AE_OK
: AE_NOT_FOUND
;
300 acpi_os_free(buf
.pointer
);
301 } else if (node
->type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
) {
302 struct acpi_iort_root_complex
*pci_rc
;
305 bus
= to_pci_bus(dev
);
306 pci_rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
309 * It is assumed that PCI segment numbers maps one-to-one
310 * with root complexes. Each segment number can represent only
313 status
= pci_rc
->pci_segment_number
== pci_domain_nr(bus
) ?
314 AE_OK
: AE_NOT_FOUND
;
320 static int iort_id_map(struct acpi_iort_id_mapping
*map
, u8 type
, u32 rid_in
,
321 u32
*rid_out
, bool check_overlap
)
323 /* Single mapping does not care for input id */
324 if (map
->flags
& ACPI_IORT_ID_SINGLE_MAPPING
) {
325 if (type
== ACPI_IORT_NODE_NAMED_COMPONENT
||
326 type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
) {
327 *rid_out
= map
->output_base
;
331 pr_warn(FW_BUG
"[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
336 if (rid_in
< map
->input_base
||
337 (rid_in
> map
->input_base
+ map
->id_count
))
342 * We already found a mapping for this input ID at the end of
343 * another region. If it coincides with the start of this
344 * region, we assume the prior match was due to the off-by-1
345 * issue mentioned below, and allow it to be superseded.
346 * Otherwise, things are *really* broken, and we just disregard
347 * duplicate matches entirely to retain compatibility.
349 pr_err(FW_BUG
"[map %p] conflicting mapping for input ID 0x%x\n",
351 if (rid_in
!= map
->input_base
)
354 pr_err(FW_BUG
"applying workaround.\n");
357 *rid_out
= map
->output_base
+ (rid_in
- map
->input_base
);
360 * Due to confusion regarding the meaning of the id_count field (which
361 * carries the number of IDs *minus 1*), we may have to disregard this
362 * match if it is at the end of the range, and overlaps with the start
365 if (map
->id_count
> 0 && rid_in
== map
->input_base
+ map
->id_count
)
370 static struct acpi_iort_node
*iort_node_get_id(struct acpi_iort_node
*node
,
371 u32
*id_out
, int index
)
373 struct acpi_iort_node
*parent
;
374 struct acpi_iort_id_mapping
*map
;
376 if (!node
->mapping_offset
|| !node
->mapping_count
||
377 index
>= node
->mapping_count
)
380 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
381 node
->mapping_offset
+ index
* sizeof(*map
));
384 if (!map
->output_reference
) {
385 pr_err(FW_BUG
"[node %p type %d] ID map has NULL parent reference\n",
390 parent
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
391 map
->output_reference
);
393 if (map
->flags
& ACPI_IORT_ID_SINGLE_MAPPING
) {
394 if (node
->type
== ACPI_IORT_NODE_NAMED_COMPONENT
||
395 node
->type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
||
396 node
->type
== ACPI_IORT_NODE_SMMU_V3
||
397 node
->type
== ACPI_IORT_NODE_PMCG
) {
398 *id_out
= map
->output_base
;
406 #ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
407 #define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
410 static int iort_get_id_mapping_index(struct acpi_iort_node
*node
)
412 struct acpi_iort_smmu_v3
*smmu
;
413 struct acpi_iort_pmcg
*pmcg
;
415 switch (node
->type
) {
416 case ACPI_IORT_NODE_SMMU_V3
:
418 * SMMUv3 dev ID mapping index was introduced in revision 1
419 * table, not available in revision 0
421 if (node
->revision
< 1)
424 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
426 * Until IORT E.e (node rev. 5), the ID mapping index was
427 * defined to be valid unless all interrupts are GSIV-based.
429 if (node
->revision
< 5) {
430 if (smmu
->event_gsiv
&& smmu
->pri_gsiv
&&
431 smmu
->gerr_gsiv
&& smmu
->sync_gsiv
)
433 } else if (!(smmu
->flags
& ACPI_IORT_SMMU_V3_DEVICEID_VALID
)) {
437 if (smmu
->id_mapping_index
>= node
->mapping_count
) {
438 pr_err(FW_BUG
"[node %p type %d] ID mapping index overflows valid mappings\n",
443 return smmu
->id_mapping_index
;
444 case ACPI_IORT_NODE_PMCG
:
445 pmcg
= (struct acpi_iort_pmcg
*)node
->node_data
;
446 if (pmcg
->overflow_gsiv
|| node
->mapping_count
== 0)
455 static struct acpi_iort_node
*iort_node_map_id(struct acpi_iort_node
*node
,
456 u32 id_in
, u32
*id_out
,
461 /* Parse the ID mapping tree to find specified node type */
463 struct acpi_iort_id_mapping
*map
;
464 int i
, index
, rc
= 0;
465 u32 out_ref
= 0, map_id
= id
;
467 if (IORT_TYPE_MASK(node
->type
) & type_mask
) {
473 if (!node
->mapping_offset
|| !node
->mapping_count
)
476 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
477 node
->mapping_offset
);
480 if (!map
->output_reference
) {
481 pr_err(FW_BUG
"[node %p type %d] ID map has NULL parent reference\n",
487 * Get the special ID mapping index (if any) and skip its
488 * associated ID map to prevent erroneous multi-stage
489 * IORT ID translations.
491 index
= iort_get_id_mapping_index(node
);
493 /* Do the ID translation */
494 for (i
= 0; i
< node
->mapping_count
; i
++, map
++) {
495 /* if it is special mapping index, skip it */
499 rc
= iort_id_map(map
, node
->type
, map_id
, &id
, out_ref
);
503 out_ref
= map
->output_reference
;
506 if (i
== node
->mapping_count
&& !out_ref
)
509 node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
510 rc
? out_ref
: map
->output_reference
);
514 /* Map input ID to output ID unchanged on mapping failure */
521 static struct acpi_iort_node
*iort_node_map_platform_id(
522 struct acpi_iort_node
*node
, u32
*id_out
, u8 type_mask
,
525 struct acpi_iort_node
*parent
;
528 /* step 1: retrieve the initial dev id */
529 parent
= iort_node_get_id(node
, &id
, index
);
534 * optional step 2: map the initial dev id if its parent is not
535 * the target type we want, map it again for the use cases such
536 * as NC (named component) -> SMMU -> ITS. If the type is matched,
537 * return the initial dev id and its parent pointer directly.
539 if (!(IORT_TYPE_MASK(parent
->type
) & type_mask
))
540 parent
= iort_node_map_id(parent
, id
, id_out
, type_mask
);
548 static struct acpi_iort_node
*iort_find_dev_node(struct device
*dev
)
550 struct pci_bus
*pbus
;
552 if (!dev_is_pci(dev
)) {
553 struct acpi_iort_node
*node
;
555 * scan iort_fwnode_list to see if it's an iort platform
556 * device (such as SMMU, PMCG),its iort node already cached
557 * and associated with fwnode when iort platform devices
560 node
= iort_get_iort_node(dev
->fwnode
);
564 * if not, then it should be a platform device defined in
565 * DSDT/SSDT (with Named Component node in IORT)
567 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
568 iort_match_node_callback
, dev
);
571 pbus
= to_pci_dev(dev
)->bus
;
573 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX
,
574 iort_match_node_callback
, &pbus
->dev
);
578 * iort_msi_map_id() - Map a MSI input ID for a device
579 * @dev: The device for which the mapping is to be done.
580 * @input_id: The device input ID.
582 * Returns: mapped MSI ID on success, input ID otherwise
584 u32
iort_msi_map_id(struct device
*dev
, u32 input_id
)
586 struct acpi_iort_node
*node
;
589 node
= iort_find_dev_node(dev
);
593 iort_node_map_id(node
, input_id
, &dev_id
, IORT_MSI_TYPE
);
598 * iort_pmsi_get_dev_id() - Get the device id for a device
599 * @dev: The device for which the mapping is to be done.
600 * @dev_id: The device ID found.
602 * Returns: 0 for successful find a dev id, -ENODEV on error
604 int iort_pmsi_get_dev_id(struct device
*dev
, u32
*dev_id
)
607 struct acpi_iort_node
*node
;
609 node
= iort_find_dev_node(dev
);
613 index
= iort_get_id_mapping_index(node
);
614 /* if there is a valid index, go get the dev_id directly */
616 if (iort_node_get_id(node
, dev_id
, index
))
619 for (i
= 0; i
< node
->mapping_count
; i
++) {
620 if (iort_node_map_platform_id(node
, dev_id
,
629 static int __maybe_unused
iort_find_its_base(u32 its_id
, phys_addr_t
*base
)
631 struct iort_its_msi_chip
*its_msi_chip
;
634 spin_lock(&iort_msi_chip_lock
);
635 list_for_each_entry(its_msi_chip
, &iort_msi_chip_list
, list
) {
636 if (its_msi_chip
->translation_id
== its_id
) {
637 *base
= its_msi_chip
->base_addr
;
642 spin_unlock(&iort_msi_chip_lock
);
648 * iort_dev_find_its_id() - Find the ITS identifier for a device
651 * @idx: Index of the ITS identifier list.
652 * @its_id: ITS identifier.
654 * Returns: 0 on success, appropriate error value otherwise
656 static int iort_dev_find_its_id(struct device
*dev
, u32 id
,
657 unsigned int idx
, int *its_id
)
659 struct acpi_iort_its_group
*its
;
660 struct acpi_iort_node
*node
;
662 node
= iort_find_dev_node(dev
);
666 node
= iort_node_map_id(node
, id
, NULL
, IORT_MSI_TYPE
);
670 /* Move to ITS specific data */
671 its
= (struct acpi_iort_its_group
*)node
->node_data
;
672 if (idx
>= its
->its_count
) {
673 dev_err(dev
, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
674 idx
, its
->its_count
);
678 *its_id
= its
->identifiers
[idx
];
683 * iort_get_device_domain() - Find MSI domain related to a device
685 * @id: Requester ID for the device.
686 * @bus_token: irq domain bus token.
688 * Returns: the MSI domain for this device, NULL otherwise
690 struct irq_domain
*iort_get_device_domain(struct device
*dev
, u32 id
,
691 enum irq_domain_bus_token bus_token
)
693 struct fwnode_handle
*handle
;
696 if (iort_dev_find_its_id(dev
, id
, 0, &its_id
))
699 handle
= iort_find_domain_token(its_id
);
703 return irq_find_matching_fwnode(handle
, bus_token
);
706 static void iort_set_device_domain(struct device
*dev
,
707 struct acpi_iort_node
*node
)
709 struct acpi_iort_its_group
*its
;
710 struct acpi_iort_node
*msi_parent
;
711 struct acpi_iort_id_mapping
*map
;
712 struct fwnode_handle
*iort_fwnode
;
713 struct irq_domain
*domain
;
716 index
= iort_get_id_mapping_index(node
);
720 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
721 node
->mapping_offset
+ index
* sizeof(*map
));
724 if (!map
->output_reference
||
725 !(map
->flags
& ACPI_IORT_ID_SINGLE_MAPPING
)) {
726 pr_err(FW_BUG
"[node %p type %d] Invalid MSI mapping\n",
731 msi_parent
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
732 map
->output_reference
);
734 if (!msi_parent
|| msi_parent
->type
!= ACPI_IORT_NODE_ITS_GROUP
)
737 /* Move to ITS specific data */
738 its
= (struct acpi_iort_its_group
*)msi_parent
->node_data
;
740 iort_fwnode
= iort_find_domain_token(its
->identifiers
[0]);
744 domain
= irq_find_matching_fwnode(iort_fwnode
, DOMAIN_BUS_PLATFORM_MSI
);
746 dev_set_msi_domain(dev
, domain
);
750 * iort_get_platform_device_domain() - Find MSI domain related to a
752 * @dev: the dev pointer associated with the platform device
754 * Returns: the MSI domain for this device, NULL otherwise
756 static struct irq_domain
*iort_get_platform_device_domain(struct device
*dev
)
758 struct acpi_iort_node
*node
, *msi_parent
= NULL
;
759 struct fwnode_handle
*iort_fwnode
;
760 struct acpi_iort_its_group
*its
;
763 /* find its associated iort node */
764 node
= iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
765 iort_match_node_callback
, dev
);
769 /* then find its msi parent node */
770 for (i
= 0; i
< node
->mapping_count
; i
++) {
771 msi_parent
= iort_node_map_platform_id(node
, NULL
,
780 /* Move to ITS specific data */
781 its
= (struct acpi_iort_its_group
*)msi_parent
->node_data
;
783 iort_fwnode
= iort_find_domain_token(its
->identifiers
[0]);
787 return irq_find_matching_fwnode(iort_fwnode
, DOMAIN_BUS_PLATFORM_MSI
);
790 void acpi_configure_pmsi_domain(struct device
*dev
)
792 struct irq_domain
*msi_domain
;
794 msi_domain
= iort_get_platform_device_domain(dev
);
796 dev_set_msi_domain(dev
, msi_domain
);
799 #ifdef CONFIG_IOMMU_API
800 static void iort_rmr_free(struct device
*dev
,
801 struct iommu_resv_region
*region
)
803 struct iommu_iort_rmr_data
*rmr_data
;
805 rmr_data
= container_of(region
, struct iommu_iort_rmr_data
, rr
);
806 kfree(rmr_data
->sids
);
810 static struct iommu_iort_rmr_data
*iort_rmr_alloc(
811 struct acpi_iort_rmr_desc
*rmr_desc
,
812 int prot
, enum iommu_resv_type type
,
813 u32
*sids
, u32 num_sids
)
815 struct iommu_iort_rmr_data
*rmr_data
;
816 struct iommu_resv_region
*region
;
818 u64 addr
= rmr_desc
->base_address
, size
= rmr_desc
->length
;
820 rmr_data
= kmalloc(sizeof(*rmr_data
), GFP_KERNEL
);
824 /* Create a copy of SIDs array to associate with this rmr_data */
825 sids_copy
= kmemdup_array(sids
, num_sids
, sizeof(*sids
), GFP_KERNEL
);
830 rmr_data
->sids
= sids_copy
;
831 rmr_data
->num_sids
= num_sids
;
833 if (!IS_ALIGNED(addr
, SZ_64K
) || !IS_ALIGNED(size
, SZ_64K
)) {
834 /* PAGE align base addr and size */
836 size
= PAGE_ALIGN(size
+ offset_in_page(rmr_desc
->base_address
));
838 pr_err(FW_BUG
"RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
839 rmr_desc
->base_address
,
840 rmr_desc
->base_address
+ rmr_desc
->length
- 1,
841 addr
, addr
+ size
- 1);
844 region
= &rmr_data
->rr
;
845 INIT_LIST_HEAD(®ion
->list
);
846 region
->start
= addr
;
847 region
->length
= size
;
850 region
->free
= iort_rmr_free
;
855 static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc
*desc
,
860 for (i
= 0; i
< count
; i
++) {
861 u64 end
, start
= desc
[i
].base_address
, length
= desc
[i
].length
;
864 pr_err(FW_BUG
"RMR descriptor[0x%llx] with zero length, continue anyway\n",
869 end
= start
+ length
- 1;
871 /* Check for address overlap */
872 for (j
= i
+ 1; j
< count
; j
++) {
873 u64 e_start
= desc
[j
].base_address
;
874 u64 e_end
= e_start
+ desc
[j
].length
- 1;
876 if (start
<= e_end
&& end
>= e_start
)
877 pr_err(FW_BUG
"RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
884 * Please note, we will keep the already allocated RMR reserve
885 * regions in case of a memory allocation failure.
887 static void iort_get_rmrs(struct acpi_iort_node
*node
,
888 struct acpi_iort_node
*smmu
,
889 u32
*sids
, u32 num_sids
,
890 struct list_head
*head
)
892 struct acpi_iort_rmr
*rmr
= (struct acpi_iort_rmr
*)node
->node_data
;
893 struct acpi_iort_rmr_desc
*rmr_desc
;
896 rmr_desc
= ACPI_ADD_PTR(struct acpi_iort_rmr_desc
, node
,
899 iort_rmr_desc_check_overlap(rmr_desc
, rmr
->rmr_count
);
901 for (i
= 0; i
< rmr
->rmr_count
; i
++, rmr_desc
++) {
902 struct iommu_iort_rmr_data
*rmr_data
;
903 enum iommu_resv_type type
;
904 int prot
= IOMMU_READ
| IOMMU_WRITE
;
906 if (rmr
->flags
& ACPI_IORT_RMR_REMAP_PERMITTED
)
907 type
= IOMMU_RESV_DIRECT_RELAXABLE
;
909 type
= IOMMU_RESV_DIRECT
;
911 if (rmr
->flags
& ACPI_IORT_RMR_ACCESS_PRIVILEGE
)
914 /* Attributes 0x00 - 0x03 represents device memory */
915 if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr
->flags
) <=
916 ACPI_IORT_RMR_ATTR_DEVICE_GRE
)
918 else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr
->flags
) ==
919 ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB
)
922 rmr_data
= iort_rmr_alloc(rmr_desc
, prot
, type
,
927 list_add_tail(&rmr_data
->rr
.list
, head
);
931 static u32
*iort_rmr_alloc_sids(u32
*sids
, u32 count
, u32 id_start
,
935 u32 total_count
= count
+ new_count
;
938 new_sids
= krealloc_array(sids
, count
+ new_count
,
939 sizeof(*new_sids
), GFP_KERNEL
);
943 for (i
= count
; i
< total_count
; i
++)
944 new_sids
[i
] = id_start
++;
949 static bool iort_rmr_has_dev(struct device
*dev
, u32 id_start
,
953 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
956 * Make sure the kernel has preserved the boot firmware PCIe
957 * configuration. This is required to ensure that the RMR PCIe
958 * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
960 if (dev_is_pci(dev
)) {
961 struct pci_dev
*pdev
= to_pci_dev(dev
);
962 struct pci_host_bridge
*host
= pci_find_host_bridge(pdev
->bus
);
964 if (!host
->preserve_config
)
968 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
969 if (fwspec
->ids
[i
] >= id_start
&&
970 fwspec
->ids
[i
] <= id_start
+ id_count
)
977 static void iort_node_get_rmr_info(struct acpi_iort_node
*node
,
978 struct acpi_iort_node
*iommu
,
979 struct device
*dev
, struct list_head
*head
)
981 struct acpi_iort_node
*smmu
= NULL
;
982 struct acpi_iort_rmr
*rmr
;
983 struct acpi_iort_id_mapping
*map
;
988 if (!node
->mapping_offset
|| !node
->mapping_count
) {
989 pr_err(FW_BUG
"Invalid ID mapping, skipping RMR node %p\n",
994 rmr
= (struct acpi_iort_rmr
*)node
->node_data
;
995 if (!rmr
->rmr_offset
|| !rmr
->rmr_count
)
998 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, node
,
999 node
->mapping_offset
);
1002 * Go through the ID mappings and see if we have a match for SMMU
1003 * and dev(if !NULL). If found, get the sids for the Node.
1004 * Please note, id_count is equal to the number of IDs in the
1007 for (i
= 0; i
< node
->mapping_count
; i
++, map
++) {
1008 struct acpi_iort_node
*parent
;
1010 parent
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_table
,
1011 map
->output_reference
);
1012 if (parent
!= iommu
)
1015 /* If dev is valid, check RMR node corresponds to the dev SID */
1016 if (dev
&& !iort_rmr_has_dev(dev
, map
->output_base
,
1020 /* Retrieve SIDs associated with the Node. */
1021 sids
= iort_rmr_alloc_sids(sids
, num_sids
, map
->output_base
,
1026 num_sids
+= map
->id_count
+ 1;
1032 iort_get_rmrs(node
, smmu
, sids
, num_sids
, head
);
1036 static void iort_find_rmrs(struct acpi_iort_node
*iommu
, struct device
*dev
,
1037 struct list_head
*head
)
1039 struct acpi_table_iort
*iort
;
1040 struct acpi_iort_node
*iort_node
, *iort_end
;
1043 /* Only supports ARM DEN 0049E.d onwards */
1044 if (iort_table
->revision
< 5)
1047 iort
= (struct acpi_table_iort
*)iort_table
;
1049 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
1051 iort_end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
1052 iort_table
->length
);
1054 for (i
= 0; i
< iort
->node_count
; i
++) {
1055 if (WARN_TAINT(iort_node
>= iort_end
, TAINT_FIRMWARE_WORKAROUND
,
1056 "IORT node pointer overflows, bad table!\n"))
1059 if (iort_node
->type
== ACPI_IORT_NODE_RMR
)
1060 iort_node_get_rmr_info(iort_node
, iommu
, dev
, head
);
1062 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_node
,
1068 * Populate the RMR list associated with a given IOMMU and dev(if provided).
1069 * If dev is NULL, the function populates all the RMRs associated with the
1072 static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle
*iommu_fwnode
,
1074 struct list_head
*head
)
1076 struct acpi_iort_node
*iommu
;
1078 iommu
= iort_get_iort_node(iommu_fwnode
);
1082 iort_find_rmrs(iommu
, dev
, head
);
1085 static struct acpi_iort_node
*iort_get_msi_resv_iommu(struct device
*dev
)
1087 struct acpi_iort_node
*iommu
;
1088 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1090 iommu
= iort_get_iort_node(fwspec
->iommu_fwnode
);
1092 if (iommu
&& (iommu
->type
== ACPI_IORT_NODE_SMMU_V3
)) {
1093 struct acpi_iort_smmu_v3
*smmu
;
1095 smmu
= (struct acpi_iort_smmu_v3
*)iommu
->node_data
;
1096 if (smmu
->model
== ACPI_IORT_SMMU_V3_HISILICON_HI161X
)
1104 * Retrieve platform specific HW MSI reserve regions.
1105 * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1106 * associated with the device are the HW MSI reserved regions.
1108 static void iort_iommu_msi_get_resv_regions(struct device
*dev
,
1109 struct list_head
*head
)
1111 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1112 struct acpi_iort_its_group
*its
;
1113 struct acpi_iort_node
*iommu_node
, *its_node
= NULL
;
1116 iommu_node
= iort_get_msi_resv_iommu(dev
);
1121 * Current logic to reserve ITS regions relies on HW topologies
1122 * where a given PCI or named component maps its IDs to only one
1123 * ITS group; if a PCI or named component can map its IDs to
1124 * different ITS groups through IORT mappings this function has
1125 * to be reworked to ensure we reserve regions for all ITS groups
1126 * a given PCI or named component may map IDs to.
1129 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
1130 its_node
= iort_node_map_id(iommu_node
,
1132 NULL
, IORT_MSI_TYPE
);
1140 /* Move to ITS specific data */
1141 its
= (struct acpi_iort_its_group
*)its_node
->node_data
;
1143 for (i
= 0; i
< its
->its_count
; i
++) {
1146 if (!iort_find_its_base(its
->identifiers
[i
], &base
)) {
1147 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1148 struct iommu_resv_region
*region
;
1150 region
= iommu_alloc_resv_region(base
+ SZ_64K
, SZ_64K
,
1151 prot
, IOMMU_RESV_MSI
,
1154 list_add_tail(®ion
->list
, head
);
1160 * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1161 * @dev: Device from iommu_get_resv_regions()
1162 * @head: Reserved region list from iommu_get_resv_regions()
1164 void iort_iommu_get_resv_regions(struct device
*dev
, struct list_head
*head
)
1166 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1168 iort_iommu_msi_get_resv_regions(dev
, head
);
1169 iort_iommu_rmr_get_resv_regions(fwspec
->iommu_fwnode
, dev
, head
);
1173 * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1174 * associated StreamIDs information.
1175 * @iommu_fwnode: fwnode associated with IOMMU
1176 * @head: Resereved region list
1178 void iort_get_rmr_sids(struct fwnode_handle
*iommu_fwnode
,
1179 struct list_head
*head
)
1181 iort_iommu_rmr_get_resv_regions(iommu_fwnode
, NULL
, head
);
1183 EXPORT_SYMBOL_GPL(iort_get_rmr_sids
);
1186 * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1187 * @iommu_fwnode: fwnode associated with IOMMU
1188 * @head: Resereved region list
1190 void iort_put_rmr_sids(struct fwnode_handle
*iommu_fwnode
,
1191 struct list_head
*head
)
1193 struct iommu_resv_region
*entry
, *next
;
1195 list_for_each_entry_safe(entry
, next
, head
, list
)
1196 entry
->free(NULL
, entry
);
1198 EXPORT_SYMBOL_GPL(iort_put_rmr_sids
);
1200 static inline bool iort_iommu_driver_enabled(u8 type
)
1203 case ACPI_IORT_NODE_SMMU_V3
:
1204 return IS_ENABLED(CONFIG_ARM_SMMU_V3
);
1205 case ACPI_IORT_NODE_SMMU
:
1206 return IS_ENABLED(CONFIG_ARM_SMMU
);
1208 pr_warn("IORT node type %u does not describe an SMMU\n", type
);
1213 static bool iort_pci_rc_supports_ats(struct acpi_iort_node
*node
)
1215 struct acpi_iort_root_complex
*pci_rc
;
1217 pci_rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
1218 return pci_rc
->ats_attribute
& ACPI_IORT_ATS_SUPPORTED
;
1221 static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node
*node
)
1223 struct acpi_iort_memory_access
*memory_access
;
1224 struct acpi_iort_root_complex
*pci_rc
;
1226 pci_rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
1228 (struct acpi_iort_memory_access
*)&pci_rc
->memory_properties
;
1229 return memory_access
->memory_flags
& ACPI_IORT_MF_CANWBS
;
1232 static int iort_iommu_xlate(struct device
*dev
, struct acpi_iort_node
*node
,
1235 struct fwnode_handle
*iort_fwnode
;
1237 /* If there's no SMMU driver at all, give up now */
1238 if (!node
|| !iort_iommu_driver_enabled(node
->type
))
1241 iort_fwnode
= iort_get_fwnode(node
);
1246 * If the SMMU drivers are enabled but not loaded/probed
1247 * yet, this will defer.
1249 return acpi_iommu_fwspec_init(dev
, streamid
, iort_fwnode
);
1252 struct iort_pci_alias_info
{
1254 struct acpi_iort_node
*node
;
1257 static int iort_pci_iommu_init(struct pci_dev
*pdev
, u16 alias
, void *data
)
1259 struct iort_pci_alias_info
*info
= data
;
1260 struct acpi_iort_node
*parent
;
1263 parent
= iort_node_map_id(info
->node
, alias
, &streamid
,
1265 return iort_iommu_xlate(info
->dev
, parent
, streamid
);
1268 static void iort_named_component_init(struct device
*dev
,
1269 struct acpi_iort_node
*node
)
1271 struct property_entry props
[3] = {};
1272 struct acpi_iort_named_component
*nc
;
1274 nc
= (struct acpi_iort_named_component
*)node
->node_data
;
1275 props
[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1276 FIELD_GET(ACPI_IORT_NC_PASID_BITS
,
1278 if (nc
->node_flags
& ACPI_IORT_NC_STALL_SUPPORTED
)
1279 props
[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1281 if (device_create_managed_software_node(dev
, props
, NULL
))
1282 dev_warn(dev
, "Could not add device properties\n");
1285 static int iort_nc_iommu_map(struct device
*dev
, struct acpi_iort_node
*node
)
1287 struct acpi_iort_node
*parent
;
1288 int err
= -ENODEV
, i
= 0;
1293 parent
= iort_node_map_platform_id(node
, &streamid
,
1298 err
= iort_iommu_xlate(dev
, parent
, streamid
);
1299 } while (parent
&& !err
);
1304 static int iort_nc_iommu_map_id(struct device
*dev
,
1305 struct acpi_iort_node
*node
,
1308 struct acpi_iort_node
*parent
;
1311 parent
= iort_node_map_id(node
, *in_id
, &streamid
, IORT_IOMMU_TYPE
);
1313 return iort_iommu_xlate(dev
, parent
, streamid
);
1320 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1322 * @dev: device to configure
1323 * @id_in: optional input id const value pointer
1325 * Returns: 0 on success, <0 on failure
1327 int iort_iommu_configure_id(struct device
*dev
, const u32
*id_in
)
1329 struct acpi_iort_node
*node
;
1332 if (dev_is_pci(dev
)) {
1333 struct iommu_fwspec
*fwspec
;
1334 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
1335 struct iort_pci_alias_info info
= { .dev
= dev
};
1337 node
= iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX
,
1338 iort_match_node_callback
, &bus
->dev
);
1343 err
= pci_for_each_dma_alias(to_pci_dev(dev
),
1344 iort_pci_iommu_init
, &info
);
1346 fwspec
= dev_iommu_fwspec_get(dev
);
1347 if (fwspec
&& iort_pci_rc_supports_ats(node
))
1348 fwspec
->flags
|= IOMMU_FWSPEC_PCI_RC_ATS
;
1349 if (fwspec
&& iort_pci_rc_supports_canwbs(node
))
1350 fwspec
->flags
|= IOMMU_FWSPEC_PCI_RC_CANWBS
;
1352 node
= iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
1353 iort_match_node_callback
, dev
);
1357 err
= id_in
? iort_nc_iommu_map_id(dev
, node
, id_in
) :
1358 iort_nc_iommu_map(dev
, node
);
1361 iort_named_component_init(dev
, node
);
1368 void iort_iommu_get_resv_regions(struct device
*dev
, struct list_head
*head
)
1370 int iort_iommu_configure_id(struct device
*dev
, const u32
*input_id
)
1374 static int nc_dma_get_range(struct device
*dev
, u64
*limit
)
1376 struct acpi_iort_node
*node
;
1377 struct acpi_iort_named_component
*ncomp
;
1379 node
= iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT
,
1380 iort_match_node_callback
, dev
);
1384 ncomp
= (struct acpi_iort_named_component
*)node
->node_data
;
1386 if (!ncomp
->memory_address_limit
) {
1387 pr_warn(FW_BUG
"Named component missing memory address limit\n");
1391 *limit
= ncomp
->memory_address_limit
>= 64 ? U64_MAX
:
1392 (1ULL << ncomp
->memory_address_limit
) - 1;
1397 static int rc_dma_get_range(struct device
*dev
, u64
*limit
)
1399 struct acpi_iort_node
*node
;
1400 struct acpi_iort_root_complex
*rc
;
1401 struct pci_bus
*pbus
= to_pci_dev(dev
)->bus
;
1403 node
= iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX
,
1404 iort_match_node_callback
, &pbus
->dev
);
1405 if (!node
|| node
->revision
< 1)
1408 rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
1410 if (!rc
->memory_address_limit
) {
1411 pr_warn(FW_BUG
"Root complex missing memory address limit\n");
1415 *limit
= rc
->memory_address_limit
>= 64 ? U64_MAX
:
1416 (1ULL << rc
->memory_address_limit
) - 1;
1422 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1423 * @dev: device to lookup
1424 * @limit: DMA limit result pointer
1426 * Return: 0 on success, an error otherwise.
1428 int iort_dma_get_ranges(struct device
*dev
, u64
*limit
)
1430 if (dev_is_pci(dev
))
1431 return rc_dma_get_range(dev
, limit
);
1433 return nc_dma_get_range(dev
, limit
);
1436 static void __init
acpi_iort_register_irq(int hwirq
, const char *name
,
1438 struct resource
*res
)
1440 int irq
= acpi_register_gsi(NULL
, hwirq
, trigger
,
1444 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq
,
1451 res
->flags
= IORESOURCE_IRQ
;
1455 static int __init
arm_smmu_v3_count_resources(struct acpi_iort_node
*node
)
1457 struct acpi_iort_smmu_v3
*smmu
;
1458 /* Always present mem resource */
1461 /* Retrieve SMMUv3 specific data */
1462 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1464 if (smmu
->event_gsiv
)
1470 if (smmu
->gerr_gsiv
)
1473 if (smmu
->sync_gsiv
)
1479 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3
*smmu
)
1482 * Cavium ThunderX2 implementation doesn't not support unique
1483 * irq line. Use single irq line for all the SMMUv3 interrupts.
1485 if (smmu
->model
!= ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
)
1489 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1492 return smmu
->event_gsiv
== smmu
->pri_gsiv
&&
1493 smmu
->event_gsiv
== smmu
->gerr_gsiv
&&
1494 smmu
->event_gsiv
== smmu
->sync_gsiv
;
1497 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3
*smmu
)
1500 * Override the size, for Cavium ThunderX2 implementation
1501 * which doesn't support the page 1 SMMU register space.
1503 if (smmu
->model
== ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
)
1509 static void __init
arm_smmu_v3_init_resources(struct resource
*res
,
1510 struct acpi_iort_node
*node
)
1512 struct acpi_iort_smmu_v3
*smmu
;
1515 /* Retrieve SMMUv3 specific data */
1516 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1518 res
[num_res
].start
= smmu
->base_address
;
1519 res
[num_res
].end
= smmu
->base_address
+
1520 arm_smmu_v3_resource_size(smmu
) - 1;
1521 res
[num_res
].flags
= IORESOURCE_MEM
;
1524 if (arm_smmu_v3_is_combined_irq(smmu
)) {
1525 if (smmu
->event_gsiv
)
1526 acpi_iort_register_irq(smmu
->event_gsiv
, "combined",
1527 ACPI_EDGE_SENSITIVE
,
1531 if (smmu
->event_gsiv
)
1532 acpi_iort_register_irq(smmu
->event_gsiv
, "eventq",
1533 ACPI_EDGE_SENSITIVE
,
1537 acpi_iort_register_irq(smmu
->pri_gsiv
, "priq",
1538 ACPI_EDGE_SENSITIVE
,
1541 if (smmu
->gerr_gsiv
)
1542 acpi_iort_register_irq(smmu
->gerr_gsiv
, "gerror",
1543 ACPI_EDGE_SENSITIVE
,
1546 if (smmu
->sync_gsiv
)
1547 acpi_iort_register_irq(smmu
->sync_gsiv
, "cmdq-sync",
1548 ACPI_EDGE_SENSITIVE
,
1553 static void __init
arm_smmu_v3_dma_configure(struct device
*dev
,
1554 struct acpi_iort_node
*node
)
1556 struct acpi_iort_smmu_v3
*smmu
;
1557 enum dev_dma_attr attr
;
1559 /* Retrieve SMMUv3 specific data */
1560 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1562 attr
= (smmu
->flags
& ACPI_IORT_SMMU_V3_COHACC_OVERRIDE
) ?
1563 DEV_DMA_COHERENT
: DEV_DMA_NON_COHERENT
;
1565 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1566 dev
->dma_mask
= &dev
->coherent_dma_mask
;
1568 /* Configure DMA for the page table walker */
1569 acpi_dma_configure(dev
, attr
);
1572 #if defined(CONFIG_ACPI_NUMA)
1574 * set numa proximity domain for smmuv3 device
1576 static int __init
arm_smmu_v3_set_proximity(struct device
*dev
,
1577 struct acpi_iort_node
*node
)
1579 struct acpi_iort_smmu_v3
*smmu
;
1581 smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
1582 if (smmu
->flags
& ACPI_IORT_SMMU_V3_PXM_VALID
) {
1583 int dev_node
= pxm_to_node(smmu
->pxm
);
1585 if (dev_node
!= NUMA_NO_NODE
&& !node_online(dev_node
))
1588 set_dev_node(dev
, dev_node
);
1589 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1596 #define arm_smmu_v3_set_proximity NULL
1599 static int __init
arm_smmu_count_resources(struct acpi_iort_node
*node
)
1601 struct acpi_iort_smmu
*smmu
;
1603 /* Retrieve SMMU specific data */
1604 smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1607 * Only consider the global fault interrupt and ignore the
1608 * configuration access interrupt.
1610 * MMIO address and global fault interrupt resources are always
1611 * present so add them to the context interrupt count as a static
1614 return smmu
->context_interrupt_count
+ 2;
1617 static void __init
arm_smmu_init_resources(struct resource
*res
,
1618 struct acpi_iort_node
*node
)
1620 struct acpi_iort_smmu
*smmu
;
1621 int i
, hw_irq
, trigger
, num_res
= 0;
1622 u64
*ctx_irq
, *glb_irq
;
1624 /* Retrieve SMMU specific data */
1625 smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1627 res
[num_res
].start
= smmu
->base_address
;
1628 res
[num_res
].end
= smmu
->base_address
+ smmu
->span
- 1;
1629 res
[num_res
].flags
= IORESOURCE_MEM
;
1632 glb_irq
= ACPI_ADD_PTR(u64
, node
, smmu
->global_interrupt_offset
);
1634 hw_irq
= IORT_IRQ_MASK(glb_irq
[0]);
1635 trigger
= IORT_IRQ_TRIGGER_MASK(glb_irq
[0]);
1637 acpi_iort_register_irq(hw_irq
, "arm-smmu-global", trigger
,
1641 ctx_irq
= ACPI_ADD_PTR(u64
, node
, smmu
->context_interrupt_offset
);
1642 for (i
= 0; i
< smmu
->context_interrupt_count
; i
++) {
1643 hw_irq
= IORT_IRQ_MASK(ctx_irq
[i
]);
1644 trigger
= IORT_IRQ_TRIGGER_MASK(ctx_irq
[i
]);
1646 acpi_iort_register_irq(hw_irq
, "arm-smmu-context", trigger
,
1651 static void __init
arm_smmu_dma_configure(struct device
*dev
,
1652 struct acpi_iort_node
*node
)
1654 struct acpi_iort_smmu
*smmu
;
1655 enum dev_dma_attr attr
;
1657 /* Retrieve SMMU specific data */
1658 smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1660 attr
= (smmu
->flags
& ACPI_IORT_SMMU_COHERENT_WALK
) ?
1661 DEV_DMA_COHERENT
: DEV_DMA_NON_COHERENT
;
1663 /* We expect the dma masks to be equivalent for SMMU set-ups */
1664 dev
->dma_mask
= &dev
->coherent_dma_mask
;
1666 /* Configure DMA for the page table walker */
1667 acpi_dma_configure(dev
, attr
);
1670 static int __init
arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node
*node
)
1672 struct acpi_iort_pmcg
*pmcg
;
1674 /* Retrieve PMCG specific data */
1675 pmcg
= (struct acpi_iort_pmcg
*)node
->node_data
;
1678 * There are always 2 memory resources.
1679 * If the overflow_gsiv is present then add that for a total of 3.
1681 return pmcg
->overflow_gsiv
? 3 : 2;
1684 static void __init
arm_smmu_v3_pmcg_init_resources(struct resource
*res
,
1685 struct acpi_iort_node
*node
)
1687 struct acpi_iort_pmcg
*pmcg
;
1689 /* Retrieve PMCG specific data */
1690 pmcg
= (struct acpi_iort_pmcg
*)node
->node_data
;
1692 res
[0].start
= pmcg
->page0_base_address
;
1693 res
[0].end
= pmcg
->page0_base_address
+ SZ_4K
- 1;
1694 res
[0].flags
= IORESOURCE_MEM
;
1696 * The initial version in DEN0049C lacked a way to describe register
1697 * page 1, which makes it broken for most PMCG implementations; in
1698 * that case, just let the driver fail gracefully if it expects to
1699 * find a second memory resource.
1701 if (node
->revision
> 0) {
1702 res
[1].start
= pmcg
->page1_base_address
;
1703 res
[1].end
= pmcg
->page1_base_address
+ SZ_4K
- 1;
1704 res
[1].flags
= IORESOURCE_MEM
;
1707 if (pmcg
->overflow_gsiv
)
1708 acpi_iort_register_irq(pmcg
->overflow_gsiv
, "overflow",
1709 ACPI_EDGE_SENSITIVE
, &res
[2]);
1712 static struct acpi_platform_list pmcg_plat_info
[] __initdata
= {
1713 /* HiSilicon Hip08 Platform */
1714 {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT
, greater_than_or_equal
,
1715 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08
},
1716 /* HiSilicon Hip09 Platform */
1717 {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT
, greater_than_or_equal
,
1718 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09
},
1719 /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
1720 {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT
, greater_than_or_equal
,
1721 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09
},
1722 {"HISI ", "HIP10C ", 0, ACPI_SIG_IORT
, greater_than_or_equal
,
1723 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09
},
1724 {"HISI ", "HIP11 ", 0, ACPI_SIG_IORT
, greater_than_or_equal
,
1725 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09
},
1729 static int __init
arm_smmu_v3_pmcg_add_platdata(struct platform_device
*pdev
)
1734 idx
= acpi_match_platform_list(pmcg_plat_info
);
1736 model
= pmcg_plat_info
[idx
].data
;
1738 model
= IORT_SMMU_V3_PMCG_GENERIC
;
1740 return platform_device_add_data(pdev
, &model
, sizeof(model
));
1743 struct iort_dev_config
{
1745 int (*dev_init
)(struct acpi_iort_node
*node
);
1746 void (*dev_dma_configure
)(struct device
*dev
,
1747 struct acpi_iort_node
*node
);
1748 int (*dev_count_resources
)(struct acpi_iort_node
*node
);
1749 void (*dev_init_resources
)(struct resource
*res
,
1750 struct acpi_iort_node
*node
);
1751 int (*dev_set_proximity
)(struct device
*dev
,
1752 struct acpi_iort_node
*node
);
1753 int (*dev_add_platdata
)(struct platform_device
*pdev
);
1756 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst
= {
1757 .name
= "arm-smmu-v3",
1758 .dev_dma_configure
= arm_smmu_v3_dma_configure
,
1759 .dev_count_resources
= arm_smmu_v3_count_resources
,
1760 .dev_init_resources
= arm_smmu_v3_init_resources
,
1761 .dev_set_proximity
= arm_smmu_v3_set_proximity
,
1764 static const struct iort_dev_config iort_arm_smmu_cfg __initconst
= {
1766 .dev_dma_configure
= arm_smmu_dma_configure
,
1767 .dev_count_resources
= arm_smmu_count_resources
,
1768 .dev_init_resources
= arm_smmu_init_resources
,
1771 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst
= {
1772 .name
= "arm-smmu-v3-pmcg",
1773 .dev_count_resources
= arm_smmu_v3_pmcg_count_resources
,
1774 .dev_init_resources
= arm_smmu_v3_pmcg_init_resources
,
1775 .dev_add_platdata
= arm_smmu_v3_pmcg_add_platdata
,
1778 static __init
const struct iort_dev_config
*iort_get_dev_cfg(
1779 struct acpi_iort_node
*node
)
1781 switch (node
->type
) {
1782 case ACPI_IORT_NODE_SMMU_V3
:
1783 return &iort_arm_smmu_v3_cfg
;
1784 case ACPI_IORT_NODE_SMMU
:
1785 return &iort_arm_smmu_cfg
;
1786 case ACPI_IORT_NODE_PMCG
:
1787 return &iort_arm_smmu_v3_pmcg_cfg
;
1794 * iort_add_platform_device() - Allocate a platform device for IORT node
1795 * @node: Pointer to device ACPI IORT node
1796 * @ops: Pointer to IORT device config struct
1798 * Returns: 0 on success, <0 failure
1800 static int __init
iort_add_platform_device(struct acpi_iort_node
*node
,
1801 const struct iort_dev_config
*ops
)
1803 struct fwnode_handle
*fwnode
;
1804 struct platform_device
*pdev
;
1808 pdev
= platform_device_alloc(ops
->name
, PLATFORM_DEVID_AUTO
);
1812 if (ops
->dev_set_proximity
) {
1813 ret
= ops
->dev_set_proximity(&pdev
->dev
, node
);
1818 count
= ops
->dev_count_resources(node
);
1820 r
= kcalloc(count
, sizeof(*r
), GFP_KERNEL
);
1826 ops
->dev_init_resources(r
, node
);
1828 ret
= platform_device_add_resources(pdev
, r
, count
);
1830 * Resources are duplicated in platform_device_add_resources,
1831 * free their allocated memory
1839 * Platform devices based on PMCG nodes uses platform_data to
1840 * pass the hardware model info to the driver. For others, add
1841 * a copy of IORT node pointer to platform_data to be used to
1842 * retrieve IORT data information.
1844 if (ops
->dev_add_platdata
)
1845 ret
= ops
->dev_add_platdata(pdev
);
1847 ret
= platform_device_add_data(pdev
, &node
, sizeof(node
));
1852 fwnode
= iort_get_fwnode(node
);
1859 pdev
->dev
.fwnode
= fwnode
;
1861 if (ops
->dev_dma_configure
)
1862 ops
->dev_dma_configure(&pdev
->dev
, node
);
1864 iort_set_device_domain(&pdev
->dev
, node
);
1866 ret
= platform_device_add(pdev
);
1868 goto dma_deconfigure
;
1873 arch_teardown_dma_ops(&pdev
->dev
);
1875 platform_device_put(pdev
);
1881 static void __init
iort_enable_acs(struct acpi_iort_node
*iort_node
)
1883 static bool acs_enabled __initdata
;
1888 if (iort_node
->type
== ACPI_IORT_NODE_PCI_ROOT_COMPLEX
) {
1889 struct acpi_iort_node
*parent
;
1890 struct acpi_iort_id_mapping
*map
;
1893 map
= ACPI_ADD_PTR(struct acpi_iort_id_mapping
, iort_node
,
1894 iort_node
->mapping_offset
);
1896 for (i
= 0; i
< iort_node
->mapping_count
; i
++, map
++) {
1897 if (!map
->output_reference
)
1900 parent
= ACPI_ADD_PTR(struct acpi_iort_node
,
1901 iort_table
, map
->output_reference
);
1903 * If we detect a RC->SMMU mapping, make sure
1904 * we enable ACS on the system.
1906 if ((parent
->type
== ACPI_IORT_NODE_SMMU
) ||
1907 (parent
->type
== ACPI_IORT_NODE_SMMU_V3
)) {
1916 static inline void iort_enable_acs(struct acpi_iort_node
*iort_node
) { }
1919 static void __init
iort_init_platform_devices(void)
1921 struct acpi_iort_node
*iort_node
, *iort_end
;
1922 struct acpi_table_iort
*iort
;
1923 struct fwnode_handle
*fwnode
;
1925 const struct iort_dev_config
*ops
;
1928 * iort_table and iort both point to the start of IORT table, but
1929 * have different struct types
1931 iort
= (struct acpi_table_iort
*)iort_table
;
1933 /* Get the first IORT node */
1934 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
1936 iort_end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
,
1937 iort_table
->length
);
1939 for (i
= 0; i
< iort
->node_count
; i
++) {
1940 if (iort_node
>= iort_end
) {
1941 pr_err("iort node pointer overflows, bad table\n");
1945 iort_enable_acs(iort_node
);
1947 ops
= iort_get_dev_cfg(iort_node
);
1949 fwnode
= acpi_alloc_fwnode_static();
1953 iort_set_fwnode(iort_node
, fwnode
);
1955 ret
= iort_add_platform_device(iort_node
, ops
);
1957 iort_delete_fwnode(iort_node
);
1958 acpi_free_fwnode_static(fwnode
);
1963 iort_node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort_node
,
1968 void __init
acpi_iort_init(void)
1972 /* iort_table will be used at runtime after the iort init,
1973 * so we don't need to call acpi_put_table() to release
1974 * the IORT table mapping.
1976 status
= acpi_get_table(ACPI_SIG_IORT
, 0, &iort_table
);
1977 if (ACPI_FAILURE(status
)) {
1978 if (status
!= AE_NOT_FOUND
) {
1979 const char *msg
= acpi_format_exception(status
);
1981 pr_err("Failed to get table, %s\n", msg
);
1987 iort_init_platform_devices();
1990 #ifdef CONFIG_ZONE_DMA
1992 * Extract the highest CPU physical address accessible to all DMA masters in
1993 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1995 phys_addr_t __init
acpi_iort_dma_get_max_cpu_address(void)
1997 phys_addr_t limit
= PHYS_ADDR_MAX
;
1998 struct acpi_iort_node
*node
, *end
;
1999 struct acpi_table_iort
*iort
;
2006 status
= acpi_get_table(ACPI_SIG_IORT
, 0,
2007 (struct acpi_table_header
**)&iort
);
2008 if (ACPI_FAILURE(status
))
2011 node
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
, iort
->node_offset
);
2012 end
= ACPI_ADD_PTR(struct acpi_iort_node
, iort
, iort
->header
.length
);
2014 for (i
= 0; i
< iort
->node_count
; i
++) {
2018 switch (node
->type
) {
2019 struct acpi_iort_named_component
*ncomp
;
2020 struct acpi_iort_root_complex
*rc
;
2021 phys_addr_t local_limit
;
2023 case ACPI_IORT_NODE_NAMED_COMPONENT
:
2024 ncomp
= (struct acpi_iort_named_component
*)node
->node_data
;
2025 local_limit
= DMA_BIT_MASK(ncomp
->memory_address_limit
);
2026 limit
= min_not_zero(limit
, local_limit
);
2029 case ACPI_IORT_NODE_PCI_ROOT_COMPLEX
:
2030 if (node
->revision
< 1)
2033 rc
= (struct acpi_iort_root_complex
*)node
->node_data
;
2034 local_limit
= DMA_BIT_MASK(rc
->memory_address_limit
);
2035 limit
= min_not_zero(limit
, local_limit
);
2038 node
= ACPI_ADD_PTR(struct acpi_iort_node
, node
, node
->length
);
2040 acpi_put_table(&iort
->header
);