1 // SPDX-License-Identifier: GPL-2.0-only
3 * ARM GIC v2m MSI(-X) support
4 * Support for Message Signaled Interrupts for systems that
5 * implement ARM Generic Interrupt Controller: GICv2m.
7 * Copyright (C) 2014 Advanced Micro Devices, Inc.
8 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
10 * Brandon Anderson <brandon.anderson@amd.com>
13 #define pr_fmt(fmt) "GICv2m: " fmt
15 #include <linux/acpi.h>
16 #include <linux/dma-iommu.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/kernel.h>
20 #include <linux/pci.h>
21 #include <linux/msi.h>
22 #include <linux/of_address.h>
23 #include <linux/of_pci.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/irqchip/arm-gic.h>
31 * [25:16] lowest SPI assigned to MSI
33 * [9:0] Numer of SPIs assigned to MSI
35 #define V2M_MSI_TYPER 0x008
36 #define V2M_MSI_TYPER_BASE_SHIFT 16
37 #define V2M_MSI_TYPER_BASE_MASK 0x3FF
38 #define V2M_MSI_TYPER_NUM_MASK 0x3FF
39 #define V2M_MSI_SETSPI_NS 0x040
40 #define V2M_MIN_SPI 32
41 #define V2M_MAX_SPI 1019
42 #define V2M_MSI_IIDR 0xFCC
44 #define V2M_MSI_TYPER_BASE_SPI(x) \
45 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
47 #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
49 /* APM X-Gene with GICv2m MSI_IIDR register value */
50 #define XGENE_GICV2M_MSI_IIDR 0x06000170
52 /* Broadcom NS2 GICv2m MSI_IIDR register value */
53 #define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
55 /* List of flags for specific v2m implementation */
56 #define GICV2M_NEEDS_SPI_OFFSET 0x00000001
57 #define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
59 static LIST_HEAD(v2m_nodes
);
60 static DEFINE_SPINLOCK(v2m_lock
);
63 struct list_head entry
;
64 struct fwnode_handle
*fwnode
;
65 struct resource res
; /* GICv2m resource */
66 void __iomem
*base
; /* GICv2m virt address */
67 u32 spi_start
; /* The SPI number that MSIs start */
68 u32 nr_spis
; /* The number of SPIs for MSIs */
69 u32 spi_offset
; /* offset to be subtracted from SPI number */
70 unsigned long *bm
; /* MSI vector bitmap */
71 u32 flags
; /* v2m flags for specific implementation */
74 static void gicv2m_mask_msi_irq(struct irq_data
*d
)
77 irq_chip_mask_parent(d
);
80 static void gicv2m_unmask_msi_irq(struct irq_data
*d
)
82 pci_msi_unmask_irq(d
);
83 irq_chip_unmask_parent(d
);
86 static struct irq_chip gicv2m_msi_irq_chip
= {
88 .irq_mask
= gicv2m_mask_msi_irq
,
89 .irq_unmask
= gicv2m_unmask_msi_irq
,
90 .irq_eoi
= irq_chip_eoi_parent
,
91 .irq_write_msi_msg
= pci_msi_domain_write_msg
,
94 static struct msi_domain_info gicv2m_msi_domain_info
= {
95 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
96 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
97 .chip
= &gicv2m_msi_irq_chip
,
100 static phys_addr_t
gicv2m_get_msi_addr(struct v2m_data
*v2m
, int hwirq
)
102 if (v2m
->flags
& GICV2M_GRAVITON_ADDRESS_ONLY
)
103 return v2m
->res
.start
| ((hwirq
- 32) << 3);
105 return v2m
->res
.start
+ V2M_MSI_SETSPI_NS
;
108 static void gicv2m_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
110 struct v2m_data
*v2m
= irq_data_get_irq_chip_data(data
);
111 phys_addr_t addr
= gicv2m_get_msi_addr(v2m
, data
->hwirq
);
113 msg
->address_hi
= upper_32_bits(addr
);
114 msg
->address_lo
= lower_32_bits(addr
);
116 if (v2m
->flags
& GICV2M_GRAVITON_ADDRESS_ONLY
)
119 msg
->data
= data
->hwirq
;
120 if (v2m
->flags
& GICV2M_NEEDS_SPI_OFFSET
)
121 msg
->data
-= v2m
->spi_offset
;
123 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data
), msg
);
126 static struct irq_chip gicv2m_irq_chip
= {
128 .irq_mask
= irq_chip_mask_parent
,
129 .irq_unmask
= irq_chip_unmask_parent
,
130 .irq_eoi
= irq_chip_eoi_parent
,
131 .irq_set_affinity
= irq_chip_set_affinity_parent
,
132 .irq_compose_msi_msg
= gicv2m_compose_msi_msg
,
135 static int gicv2m_irq_gic_domain_alloc(struct irq_domain
*domain
,
137 irq_hw_number_t hwirq
)
139 struct irq_fwspec fwspec
;
143 if (is_of_node(domain
->parent
->fwnode
)) {
144 fwspec
.fwnode
= domain
->parent
->fwnode
;
145 fwspec
.param_count
= 3;
147 fwspec
.param
[1] = hwirq
- 32;
148 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
149 } else if (is_fwnode_irqchip(domain
->parent
->fwnode
)) {
150 fwspec
.fwnode
= domain
->parent
->fwnode
;
151 fwspec
.param_count
= 2;
152 fwspec
.param
[0] = hwirq
;
153 fwspec
.param
[1] = IRQ_TYPE_EDGE_RISING
;
158 err
= irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
162 /* Configure the interrupt line to be edge */
163 d
= irq_domain_get_irq_data(domain
->parent
, virq
);
164 d
->chip
->irq_set_type(d
, IRQ_TYPE_EDGE_RISING
);
168 static void gicv2m_unalloc_msi(struct v2m_data
*v2m
, unsigned int hwirq
,
171 spin_lock(&v2m_lock
);
172 bitmap_release_region(v2m
->bm
, hwirq
- v2m
->spi_start
,
173 get_count_order(nr_irqs
));
174 spin_unlock(&v2m_lock
);
177 static int gicv2m_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
178 unsigned int nr_irqs
, void *args
)
180 msi_alloc_info_t
*info
= args
;
181 struct v2m_data
*v2m
= NULL
, *tmp
;
182 int hwirq
, offset
, i
, err
= 0;
184 spin_lock(&v2m_lock
);
185 list_for_each_entry(tmp
, &v2m_nodes
, entry
) {
186 offset
= bitmap_find_free_region(tmp
->bm
, tmp
->nr_spis
,
187 get_count_order(nr_irqs
));
193 spin_unlock(&v2m_lock
);
198 hwirq
= v2m
->spi_start
+ offset
;
200 err
= iommu_dma_prepare_msi(info
->desc
,
201 gicv2m_get_msi_addr(v2m
, hwirq
));
205 for (i
= 0; i
< nr_irqs
; i
++) {
206 err
= gicv2m_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
+ i
);
210 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, hwirq
+ i
,
211 &gicv2m_irq_chip
, v2m
);
217 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
218 gicv2m_unalloc_msi(v2m
, hwirq
, nr_irqs
);
222 static void gicv2m_irq_domain_free(struct irq_domain
*domain
,
223 unsigned int virq
, unsigned int nr_irqs
)
225 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
226 struct v2m_data
*v2m
= irq_data_get_irq_chip_data(d
);
228 gicv2m_unalloc_msi(v2m
, d
->hwirq
, nr_irqs
);
229 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
232 static const struct irq_domain_ops gicv2m_domain_ops
= {
233 .alloc
= gicv2m_irq_domain_alloc
,
234 .free
= gicv2m_irq_domain_free
,
237 static bool is_msi_spi_valid(u32 base
, u32 num
)
239 if (base
< V2M_MIN_SPI
) {
240 pr_err("Invalid MSI base SPI (base:%u)\n", base
);
244 if ((num
== 0) || (base
+ num
> V2M_MAX_SPI
)) {
245 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
246 num
, V2M_MAX_SPI
- V2M_MIN_SPI
+ 1);
253 static struct irq_chip gicv2m_pmsi_irq_chip
= {
257 static struct msi_domain_ops gicv2m_pmsi_ops
= {
260 static struct msi_domain_info gicv2m_pmsi_domain_info
= {
261 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
),
262 .ops
= &gicv2m_pmsi_ops
,
263 .chip
= &gicv2m_pmsi_irq_chip
,
266 static void gicv2m_teardown(void)
268 struct v2m_data
*v2m
, *tmp
;
270 list_for_each_entry_safe(v2m
, tmp
, &v2m_nodes
, entry
) {
271 list_del(&v2m
->entry
);
274 of_node_put(to_of_node(v2m
->fwnode
));
275 if (is_fwnode_irqchip(v2m
->fwnode
))
276 irq_domain_free_fwnode(v2m
->fwnode
);
281 static int gicv2m_allocate_domains(struct irq_domain
*parent
)
283 struct irq_domain
*inner_domain
, *pci_domain
, *plat_domain
;
284 struct v2m_data
*v2m
;
286 v2m
= list_first_entry_or_null(&v2m_nodes
, struct v2m_data
, entry
);
290 inner_domain
= irq_domain_create_tree(v2m
->fwnode
,
291 &gicv2m_domain_ops
, v2m
);
293 pr_err("Failed to create GICv2m domain\n");
297 irq_domain_update_bus_token(inner_domain
, DOMAIN_BUS_NEXUS
);
298 inner_domain
->parent
= parent
;
299 pci_domain
= pci_msi_create_irq_domain(v2m
->fwnode
,
300 &gicv2m_msi_domain_info
,
302 plat_domain
= platform_msi_create_irq_domain(v2m
->fwnode
,
303 &gicv2m_pmsi_domain_info
,
305 if (!pci_domain
|| !plat_domain
) {
306 pr_err("Failed to create MSI domains\n");
308 irq_domain_remove(plat_domain
);
310 irq_domain_remove(pci_domain
);
311 irq_domain_remove(inner_domain
);
318 static int __init
gicv2m_init_one(struct fwnode_handle
*fwnode
,
319 u32 spi_start
, u32 nr_spis
,
320 struct resource
*res
, u32 flags
)
323 struct v2m_data
*v2m
;
325 v2m
= kzalloc(sizeof(struct v2m_data
), GFP_KERNEL
);
327 pr_err("Failed to allocate struct v2m_data.\n");
331 INIT_LIST_HEAD(&v2m
->entry
);
332 v2m
->fwnode
= fwnode
;
335 memcpy(&v2m
->res
, res
, sizeof(struct resource
));
337 v2m
->base
= ioremap(v2m
->res
.start
, resource_size(&v2m
->res
));
339 pr_err("Failed to map GICv2m resource\n");
344 if (spi_start
&& nr_spis
) {
345 v2m
->spi_start
= spi_start
;
346 v2m
->nr_spis
= nr_spis
;
350 /* Graviton should always have explicit spi_start/nr_spis */
351 if (v2m
->flags
& GICV2M_GRAVITON_ADDRESS_ONLY
) {
355 typer
= readl_relaxed(v2m
->base
+ V2M_MSI_TYPER
);
357 v2m
->spi_start
= V2M_MSI_TYPER_BASE_SPI(typer
);
358 v2m
->nr_spis
= V2M_MSI_TYPER_NUM_SPI(typer
);
361 if (!is_msi_spi_valid(v2m
->spi_start
, v2m
->nr_spis
)) {
367 * APM X-Gene GICv2m implementation has an erratum where
368 * the MSI data needs to be the offset from the spi_start
369 * in order to trigger the correct MSI interrupt. This is
370 * different from the standard GICv2m implementation where
371 * the MSI data is the absolute value within the range from
372 * spi_start to (spi_start + num_spis).
374 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
375 * is 'spi_number - 32'
377 * Reading that register fails on the Graviton implementation
379 if (!(v2m
->flags
& GICV2M_GRAVITON_ADDRESS_ONLY
)) {
380 switch (readl_relaxed(v2m
->base
+ V2M_MSI_IIDR
)) {
381 case XGENE_GICV2M_MSI_IIDR
:
382 v2m
->flags
|= GICV2M_NEEDS_SPI_OFFSET
;
383 v2m
->spi_offset
= v2m
->spi_start
;
385 case BCM_NS2_GICV2M_MSI_IIDR
:
386 v2m
->flags
|= GICV2M_NEEDS_SPI_OFFSET
;
387 v2m
->spi_offset
= 32;
391 v2m
->bm
= kcalloc(BITS_TO_LONGS(v2m
->nr_spis
), sizeof(long),
398 list_add_tail(&v2m
->entry
, &v2m_nodes
);
400 pr_info("range%pR, SPI[%d:%d]\n", res
,
401 v2m
->spi_start
, (v2m
->spi_start
+ v2m
->nr_spis
- 1));
411 static struct of_device_id gicv2m_device_id
[] = {
412 { .compatible
= "arm,gic-v2m-frame", },
416 static int __init
gicv2m_of_init(struct fwnode_handle
*parent_handle
,
417 struct irq_domain
*parent
)
420 struct device_node
*node
= to_of_node(parent_handle
);
421 struct device_node
*child
;
423 for (child
= of_find_matching_node(node
, gicv2m_device_id
); child
;
424 child
= of_find_matching_node(child
, gicv2m_device_id
)) {
425 u32 spi_start
= 0, nr_spis
= 0;
428 if (!of_find_property(child
, "msi-controller", NULL
))
431 ret
= of_address_to_resource(child
, 0, &res
);
433 pr_err("Failed to allocate v2m resource.\n");
437 if (!of_property_read_u32(child
, "arm,msi-base-spi",
439 !of_property_read_u32(child
, "arm,msi-num-spis", &nr_spis
))
440 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
443 ret
= gicv2m_init_one(&child
->fwnode
, spi_start
, nr_spis
,
452 ret
= gicv2m_allocate_domains(parent
);
459 static int acpi_num_msi
;
461 static struct fwnode_handle
*gicv2m_get_fwnode(struct device
*dev
)
463 struct v2m_data
*data
;
465 if (WARN_ON(acpi_num_msi
<= 0))
468 /* We only return the fwnode of the first MSI frame. */
469 data
= list_first_entry_or_null(&v2m_nodes
, struct v2m_data
, entry
);
476 static bool acpi_check_amazon_graviton_quirks(void)
478 static struct acpi_table_madt
*madt
;
482 #define ACPI_AMZN_OEM_ID "AMAZON"
484 status
= acpi_get_table(ACPI_SIG_MADT
, 0,
485 (struct acpi_table_header
**)&madt
);
487 if (ACPI_FAILURE(status
) || !madt
)
489 rc
= !memcmp(madt
->header
.oem_id
, ACPI_AMZN_OEM_ID
, ACPI_OEM_ID_SIZE
);
490 acpi_put_table((struct acpi_table_header
*)madt
);
496 acpi_parse_madt_msi(union acpi_subtable_headers
*header
,
497 const unsigned long end
)
501 u32 spi_start
= 0, nr_spis
= 0;
502 struct acpi_madt_generic_msi_frame
*m
;
503 struct fwnode_handle
*fwnode
;
506 m
= (struct acpi_madt_generic_msi_frame
*)header
;
507 if (BAD_MADT_ENTRY(m
, end
))
510 res
.start
= m
->base_address
;
511 res
.end
= m
->base_address
+ SZ_4K
- 1;
512 res
.flags
= IORESOURCE_MEM
;
514 if (acpi_check_amazon_graviton_quirks()) {
515 pr_info("applying Amazon Graviton quirk\n");
516 res
.end
= res
.start
+ SZ_8K
- 1;
517 flags
|= GICV2M_GRAVITON_ADDRESS_ONLY
;
518 gicv2m_msi_domain_info
.flags
&= ~MSI_FLAG_MULTI_PCI_MSI
;
521 if (m
->flags
& ACPI_MADT_OVERRIDE_SPI_VALUES
) {
522 spi_start
= m
->spi_base
;
523 nr_spis
= m
->spi_count
;
525 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
529 fwnode
= irq_domain_alloc_fwnode(&res
.start
);
531 pr_err("Unable to allocate GICv2m domain token\n");
535 ret
= gicv2m_init_one(fwnode
, spi_start
, nr_spis
, &res
, flags
);
537 irq_domain_free_fwnode(fwnode
);
542 static int __init
gicv2m_acpi_init(struct irq_domain
*parent
)
546 if (acpi_num_msi
> 0)
549 acpi_num_msi
= acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME
,
550 acpi_parse_madt_msi
, 0);
552 if (acpi_num_msi
<= 0)
555 ret
= gicv2m_allocate_domains(parent
);
559 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode
);
567 #else /* CONFIG_ACPI */
568 static int __init
gicv2m_acpi_init(struct irq_domain
*parent
)
572 #endif /* CONFIG_ACPI */
574 int __init
gicv2m_init(struct fwnode_handle
*parent_handle
,
575 struct irq_domain
*parent
)
577 if (is_of_node(parent_handle
))
578 return gicv2m_of_init(parent_handle
, parent
);
580 return gicv2m_acpi_init(parent
);