1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #define pr_fmt(fmt) "GICv3: " fmt
9 #include <linux/dma-iommu.h>
10 #include <linux/irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/kernel.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/irqchip/arm-gic-v3.h>
27 static DEFINE_MUTEX(mbi_lock
);
28 static phys_addr_t mbi_phys_base
;
29 static struct mbi_range
*mbi_ranges
;
30 static unsigned int mbi_range_nr
;
32 static struct irq_chip mbi_irq_chip
= {
34 .irq_mask
= irq_chip_mask_parent
,
35 .irq_unmask
= irq_chip_unmask_parent
,
36 .irq_eoi
= irq_chip_eoi_parent
,
37 .irq_set_type
= irq_chip_set_type_parent
,
38 .irq_set_affinity
= irq_chip_set_affinity_parent
,
41 static int mbi_irq_gic_domain_alloc(struct irq_domain
*domain
,
43 irq_hw_number_t hwirq
)
45 struct irq_fwspec fwspec
;
50 * Using ACPI? There is no MBI support in the spec, you
51 * shouldn't even be here.
53 if (!is_of_node(domain
->parent
->fwnode
))
57 * Let's default to edge. This is consistent with traditional
58 * MSIs, and systems requiring level signaling will just
59 * enforce the trigger on their own.
61 fwspec
.fwnode
= domain
->parent
->fwnode
;
62 fwspec
.param_count
= 3;
64 fwspec
.param
[1] = hwirq
- 32;
65 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
67 err
= irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
71 d
= irq_domain_get_irq_data(domain
->parent
, virq
);
72 return d
->chip
->irq_set_type(d
, IRQ_TYPE_EDGE_RISING
);
75 static void mbi_free_msi(struct mbi_range
*mbi
, unsigned int hwirq
,
78 mutex_lock(&mbi_lock
);
79 bitmap_release_region(mbi
->bm
, hwirq
- mbi
->spi_start
,
80 get_count_order(nr_irqs
));
81 mutex_unlock(&mbi_lock
);
84 static int mbi_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
85 unsigned int nr_irqs
, void *args
)
87 msi_alloc_info_t
*info
= args
;
88 struct mbi_range
*mbi
= NULL
;
89 int hwirq
, offset
, i
, err
= 0;
91 mutex_lock(&mbi_lock
);
92 for (i
= 0; i
< mbi_range_nr
; i
++) {
93 offset
= bitmap_find_free_region(mbi_ranges
[i
].bm
,
94 mbi_ranges
[i
].nr_spis
,
95 get_count_order(nr_irqs
));
101 mutex_unlock(&mbi_lock
);
106 hwirq
= mbi
->spi_start
+ offset
;
108 err
= iommu_dma_prepare_msi(info
->desc
,
109 mbi_phys_base
+ GICD_SETSPI_NSR
);
113 for (i
= 0; i
< nr_irqs
; i
++) {
114 err
= mbi_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
+ i
);
118 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, hwirq
+ i
,
125 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
126 mbi_free_msi(mbi
, hwirq
, nr_irqs
);
130 static void mbi_irq_domain_free(struct irq_domain
*domain
,
131 unsigned int virq
, unsigned int nr_irqs
)
133 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
134 struct mbi_range
*mbi
= irq_data_get_irq_chip_data(d
);
136 mbi_free_msi(mbi
, d
->hwirq
, nr_irqs
);
137 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
140 static const struct irq_domain_ops mbi_domain_ops
= {
141 .alloc
= mbi_irq_domain_alloc
,
142 .free
= mbi_irq_domain_free
,
145 static void mbi_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
147 msg
[0].address_hi
= upper_32_bits(mbi_phys_base
+ GICD_SETSPI_NSR
);
148 msg
[0].address_lo
= lower_32_bits(mbi_phys_base
+ GICD_SETSPI_NSR
);
149 msg
[0].data
= data
->parent_data
->hwirq
;
151 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data
), msg
);
154 #ifdef CONFIG_PCI_MSI
155 /* PCI-specific irqchip */
156 static void mbi_mask_msi_irq(struct irq_data
*d
)
159 irq_chip_mask_parent(d
);
162 static void mbi_unmask_msi_irq(struct irq_data
*d
)
164 pci_msi_unmask_irq(d
);
165 irq_chip_unmask_parent(d
);
168 static struct irq_chip mbi_msi_irq_chip
= {
170 .irq_mask
= mbi_mask_msi_irq
,
171 .irq_unmask
= mbi_unmask_msi_irq
,
172 .irq_eoi
= irq_chip_eoi_parent
,
173 .irq_compose_msi_msg
= mbi_compose_msi_msg
,
174 .irq_write_msi_msg
= pci_msi_domain_write_msg
,
177 static struct msi_domain_info mbi_msi_domain_info
= {
178 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
179 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
180 .chip
= &mbi_msi_irq_chip
,
183 static int mbi_allocate_pci_domain(struct irq_domain
*nexus_domain
,
184 struct irq_domain
**pci_domain
)
186 *pci_domain
= pci_msi_create_irq_domain(nexus_domain
->parent
->fwnode
,
187 &mbi_msi_domain_info
,
195 static int mbi_allocate_pci_domain(struct irq_domain
*nexus_domain
,
196 struct irq_domain
**pci_domain
)
203 static void mbi_compose_mbi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
205 mbi_compose_msi_msg(data
, msg
);
207 msg
[1].address_hi
= upper_32_bits(mbi_phys_base
+ GICD_CLRSPI_NSR
);
208 msg
[1].address_lo
= lower_32_bits(mbi_phys_base
+ GICD_CLRSPI_NSR
);
209 msg
[1].data
= data
->parent_data
->hwirq
;
211 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data
), &msg
[1]);
214 /* Platform-MSI specific irqchip */
215 static struct irq_chip mbi_pmsi_irq_chip
= {
217 .irq_set_type
= irq_chip_set_type_parent
,
218 .irq_compose_msi_msg
= mbi_compose_mbi_msg
,
219 .flags
= IRQCHIP_SUPPORTS_LEVEL_MSI
,
222 static struct msi_domain_ops mbi_pmsi_ops
= {
225 static struct msi_domain_info mbi_pmsi_domain_info
= {
226 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
227 MSI_FLAG_LEVEL_CAPABLE
),
228 .ops
= &mbi_pmsi_ops
,
229 .chip
= &mbi_pmsi_irq_chip
,
232 static int mbi_allocate_domains(struct irq_domain
*parent
)
234 struct irq_domain
*nexus_domain
, *pci_domain
, *plat_domain
;
237 nexus_domain
= irq_domain_create_tree(parent
->fwnode
,
238 &mbi_domain_ops
, NULL
);
242 irq_domain_update_bus_token(nexus_domain
, DOMAIN_BUS_NEXUS
);
243 nexus_domain
->parent
= parent
;
245 err
= mbi_allocate_pci_domain(nexus_domain
, &pci_domain
);
247 plat_domain
= platform_msi_create_irq_domain(parent
->fwnode
,
248 &mbi_pmsi_domain_info
,
251 if (err
|| !plat_domain
) {
253 irq_domain_remove(plat_domain
);
255 irq_domain_remove(pci_domain
);
256 irq_domain_remove(nexus_domain
);
263 int __init
mbi_init(struct fwnode_handle
*fwnode
, struct irq_domain
*parent
)
265 struct device_node
*np
;
269 np
= to_of_node(fwnode
);
271 if (!of_property_read_bool(np
, "msi-controller"))
274 n
= of_property_count_elems_of_size(np
, "mbi-ranges", sizeof(u32
));
278 mbi_range_nr
= n
/ 2;
279 mbi_ranges
= kcalloc(mbi_range_nr
, sizeof(*mbi_ranges
), GFP_KERNEL
);
283 for (n
= 0; n
< mbi_range_nr
; n
++) {
284 ret
= of_property_read_u32_index(np
, "mbi-ranges", n
* 2,
285 &mbi_ranges
[n
].spi_start
);
288 ret
= of_property_read_u32_index(np
, "mbi-ranges", n
* 2 + 1,
289 &mbi_ranges
[n
].nr_spis
);
293 mbi_ranges
[n
].bm
= kcalloc(BITS_TO_LONGS(mbi_ranges
[n
].nr_spis
),
294 sizeof(long), GFP_KERNEL
);
295 if (!mbi_ranges
[n
].bm
) {
299 pr_info("MBI range [%d:%d]\n", mbi_ranges
[n
].spi_start
,
300 mbi_ranges
[n
].spi_start
+ mbi_ranges
[n
].nr_spis
- 1);
303 reg
= of_get_property(np
, "mbi-alias", NULL
);
305 mbi_phys_base
= of_translate_address(np
, reg
);
306 if (mbi_phys_base
== OF_BAD_ADDR
) {
313 if (of_address_to_resource(np
, 0, &res
)) {
318 mbi_phys_base
= res
.start
;
321 pr_info("Using MBI frame %pa\n", &mbi_phys_base
);
323 ret
= mbi_allocate_domains(parent
);
331 for (n
= 0; n
< mbi_range_nr
; n
++)
332 kfree(mbi_ranges
[n
].bm
);