1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #define pr_fmt(fmt) "GICv3: " fmt
9 #include <linux/dma-iommu.h>
10 #include <linux/irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/kernel.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/irqchip/arm-gic-v3.h>
27 static DEFINE_MUTEX(mbi_lock
);
28 static phys_addr_t mbi_phys_base
;
29 static struct mbi_range
*mbi_ranges
;
30 static unsigned int mbi_range_nr
;
32 static struct irq_chip mbi_irq_chip
= {
34 .irq_mask
= irq_chip_mask_parent
,
35 .irq_unmask
= irq_chip_unmask_parent
,
36 .irq_eoi
= irq_chip_eoi_parent
,
37 .irq_set_type
= irq_chip_set_type_parent
,
38 .irq_set_affinity
= irq_chip_set_affinity_parent
,
41 static int mbi_irq_gic_domain_alloc(struct irq_domain
*domain
,
43 irq_hw_number_t hwirq
)
45 struct irq_fwspec fwspec
;
50 * Using ACPI? There is no MBI support in the spec, you
51 * shouldn't even be here.
53 if (!is_of_node(domain
->parent
->fwnode
))
57 * Let's default to edge. This is consistent with traditional
58 * MSIs, and systems requiring level signaling will just
59 * enforce the trigger on their own.
61 fwspec
.fwnode
= domain
->parent
->fwnode
;
62 fwspec
.param_count
= 3;
64 fwspec
.param
[1] = hwirq
- 32;
65 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
67 err
= irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
71 d
= irq_domain_get_irq_data(domain
->parent
, virq
);
72 return d
->chip
->irq_set_type(d
, IRQ_TYPE_EDGE_RISING
);
75 static void mbi_free_msi(struct mbi_range
*mbi
, unsigned int hwirq
,
78 mutex_lock(&mbi_lock
);
79 bitmap_release_region(mbi
->bm
, hwirq
- mbi
->spi_start
,
80 get_count_order(nr_irqs
));
81 mutex_unlock(&mbi_lock
);
84 static int mbi_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
85 unsigned int nr_irqs
, void *args
)
87 struct mbi_range
*mbi
= NULL
;
88 int hwirq
, offset
, i
, err
= 0;
90 mutex_lock(&mbi_lock
);
91 for (i
= 0; i
< mbi_range_nr
; i
++) {
92 offset
= bitmap_find_free_region(mbi_ranges
[i
].bm
,
93 mbi_ranges
[i
].nr_spis
,
94 get_count_order(nr_irqs
));
100 mutex_unlock(&mbi_lock
);
105 hwirq
= mbi
->spi_start
+ offset
;
107 for (i
= 0; i
< nr_irqs
; i
++) {
108 err
= mbi_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
+ i
);
112 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, hwirq
+ i
,
119 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
120 mbi_free_msi(mbi
, hwirq
, nr_irqs
);
124 static void mbi_irq_domain_free(struct irq_domain
*domain
,
125 unsigned int virq
, unsigned int nr_irqs
)
127 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
128 struct mbi_range
*mbi
= irq_data_get_irq_chip_data(d
);
130 mbi_free_msi(mbi
, d
->hwirq
, nr_irqs
);
131 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
134 static const struct irq_domain_ops mbi_domain_ops
= {
135 .alloc
= mbi_irq_domain_alloc
,
136 .free
= mbi_irq_domain_free
,
139 static void mbi_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
141 msg
[0].address_hi
= upper_32_bits(mbi_phys_base
+ GICD_SETSPI_NSR
);
142 msg
[0].address_lo
= lower_32_bits(mbi_phys_base
+ GICD_SETSPI_NSR
);
143 msg
[0].data
= data
->parent_data
->hwirq
;
145 iommu_dma_map_msi_msg(data
->irq
, msg
);
148 #ifdef CONFIG_PCI_MSI
149 /* PCI-specific irqchip */
150 static void mbi_mask_msi_irq(struct irq_data
*d
)
153 irq_chip_mask_parent(d
);
156 static void mbi_unmask_msi_irq(struct irq_data
*d
)
158 pci_msi_unmask_irq(d
);
159 irq_chip_unmask_parent(d
);
162 static struct irq_chip mbi_msi_irq_chip
= {
164 .irq_mask
= mbi_mask_msi_irq
,
165 .irq_unmask
= mbi_unmask_msi_irq
,
166 .irq_eoi
= irq_chip_eoi_parent
,
167 .irq_compose_msi_msg
= mbi_compose_msi_msg
,
168 .irq_write_msi_msg
= pci_msi_domain_write_msg
,
171 static struct msi_domain_info mbi_msi_domain_info
= {
172 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
173 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
174 .chip
= &mbi_msi_irq_chip
,
177 static int mbi_allocate_pci_domain(struct irq_domain
*nexus_domain
,
178 struct irq_domain
**pci_domain
)
180 *pci_domain
= pci_msi_create_irq_domain(nexus_domain
->parent
->fwnode
,
181 &mbi_msi_domain_info
,
189 static int mbi_allocate_pci_domain(struct irq_domain
*nexus_domain
,
190 struct irq_domain
**pci_domain
)
197 static void mbi_compose_mbi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
199 mbi_compose_msi_msg(data
, msg
);
201 msg
[1].address_hi
= upper_32_bits(mbi_phys_base
+ GICD_CLRSPI_NSR
);
202 msg
[1].address_lo
= lower_32_bits(mbi_phys_base
+ GICD_CLRSPI_NSR
);
203 msg
[1].data
= data
->parent_data
->hwirq
;
205 iommu_dma_map_msi_msg(data
->irq
, &msg
[1]);
208 /* Platform-MSI specific irqchip */
209 static struct irq_chip mbi_pmsi_irq_chip
= {
211 .irq_set_type
= irq_chip_set_type_parent
,
212 .irq_compose_msi_msg
= mbi_compose_mbi_msg
,
213 .flags
= IRQCHIP_SUPPORTS_LEVEL_MSI
,
216 static struct msi_domain_ops mbi_pmsi_ops
= {
219 static struct msi_domain_info mbi_pmsi_domain_info
= {
220 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
221 MSI_FLAG_LEVEL_CAPABLE
),
222 .ops
= &mbi_pmsi_ops
,
223 .chip
= &mbi_pmsi_irq_chip
,
226 static int mbi_allocate_domains(struct irq_domain
*parent
)
228 struct irq_domain
*nexus_domain
, *pci_domain
, *plat_domain
;
231 nexus_domain
= irq_domain_create_tree(parent
->fwnode
,
232 &mbi_domain_ops
, NULL
);
236 irq_domain_update_bus_token(nexus_domain
, DOMAIN_BUS_NEXUS
);
237 nexus_domain
->parent
= parent
;
239 err
= mbi_allocate_pci_domain(nexus_domain
, &pci_domain
);
241 plat_domain
= platform_msi_create_irq_domain(parent
->fwnode
,
242 &mbi_pmsi_domain_info
,
245 if (err
|| !plat_domain
) {
247 irq_domain_remove(plat_domain
);
249 irq_domain_remove(pci_domain
);
250 irq_domain_remove(nexus_domain
);
257 int __init
mbi_init(struct fwnode_handle
*fwnode
, struct irq_domain
*parent
)
259 struct device_node
*np
;
263 np
= to_of_node(fwnode
);
265 if (!of_property_read_bool(np
, "msi-controller"))
268 n
= of_property_count_elems_of_size(np
, "mbi-ranges", sizeof(u32
));
272 mbi_range_nr
= n
/ 2;
273 mbi_ranges
= kcalloc(mbi_range_nr
, sizeof(*mbi_ranges
), GFP_KERNEL
);
277 for (n
= 0; n
< mbi_range_nr
; n
++) {
278 ret
= of_property_read_u32_index(np
, "mbi-ranges", n
* 2,
279 &mbi_ranges
[n
].spi_start
);
282 ret
= of_property_read_u32_index(np
, "mbi-ranges", n
* 2 + 1,
283 &mbi_ranges
[n
].nr_spis
);
287 mbi_ranges
[n
].bm
= kcalloc(BITS_TO_LONGS(mbi_ranges
[n
].nr_spis
),
288 sizeof(long), GFP_KERNEL
);
289 if (!mbi_ranges
[n
].bm
) {
293 pr_info("MBI range [%d:%d]\n", mbi_ranges
[n
].spi_start
,
294 mbi_ranges
[n
].spi_start
+ mbi_ranges
[n
].nr_spis
- 1);
297 reg
= of_get_property(np
, "mbi-alias", NULL
);
299 mbi_phys_base
= of_translate_address(np
, reg
);
300 if (mbi_phys_base
== OF_BAD_ADDR
) {
307 if (of_address_to_resource(np
, 0, &res
)) {
312 mbi_phys_base
= res
.start
;
315 pr_info("Using MBI frame %pa\n", &mbi_phys_base
);
317 ret
= mbi_allocate_domains(parent
);
325 for (n
= 0; n
< mbi_range_nr
; n
++)
326 kfree(mbi_ranges
[n
].bm
);