drm/panfrost: Move gpu_{write, read}() macros to panfrost_regs.h
[linux/fpc-iii.git] / drivers / irqchip / irq-gic-v2m.c
blob3c77ab676e54a4927f5eb10dab19784f76b48e52
1 /*
2 * ARM GIC v2m MSI(-X) support
3 * Support for Message Signaled Interrupts for systems that
4 * implement ARM Generic Interrupt Controller: GICv2m.
6 * Copyright (C) 2014 Advanced Micro Devices, Inc.
7 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
8 * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
9 * Brandon Anderson <brandon.anderson@amd.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
16 #define pr_fmt(fmt) "GICv2m: " fmt
18 #include <linux/acpi.h>
19 #include <linux/dma-iommu.h>
20 #include <linux/irq.h>
21 #include <linux/irqdomain.h>
22 #include <linux/kernel.h>
23 #include <linux/msi.h>
24 #include <linux/of_address.h>
25 #include <linux/of_pci.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/irqchip/arm-gic.h>
31 * MSI_TYPER:
32 * [31:26] Reserved
33 * [25:16] lowest SPI assigned to MSI
34 * [15:10] Reserved
35 * [9:0] Numer of SPIs assigned to MSI
37 #define V2M_MSI_TYPER 0x008
38 #define V2M_MSI_TYPER_BASE_SHIFT 16
39 #define V2M_MSI_TYPER_BASE_MASK 0x3FF
40 #define V2M_MSI_TYPER_NUM_MASK 0x3FF
41 #define V2M_MSI_SETSPI_NS 0x040
42 #define V2M_MIN_SPI 32
43 #define V2M_MAX_SPI 1019
44 #define V2M_MSI_IIDR 0xFCC
46 #define V2M_MSI_TYPER_BASE_SPI(x) \
47 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
49 #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
51 /* APM X-Gene with GICv2m MSI_IIDR register value */
52 #define XGENE_GICV2M_MSI_IIDR 0x06000170
54 /* Broadcom NS2 GICv2m MSI_IIDR register value */
55 #define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
57 /* List of flags for specific v2m implementation */
58 #define GICV2M_NEEDS_SPI_OFFSET 0x00000001
60 static LIST_HEAD(v2m_nodes);
61 static DEFINE_SPINLOCK(v2m_lock);
63 struct v2m_data {
64 struct list_head entry;
65 struct fwnode_handle *fwnode;
66 struct resource res; /* GICv2m resource */
67 void __iomem *base; /* GICv2m virt address */
68 u32 spi_start; /* The SPI number that MSIs start */
69 u32 nr_spis; /* The number of SPIs for MSIs */
70 u32 spi_offset; /* offset to be subtracted from SPI number */
71 unsigned long *bm; /* MSI vector bitmap */
72 u32 flags; /* v2m flags for specific implementation */
75 static void gicv2m_mask_msi_irq(struct irq_data *d)
77 pci_msi_mask_irq(d);
78 irq_chip_mask_parent(d);
81 static void gicv2m_unmask_msi_irq(struct irq_data *d)
83 pci_msi_unmask_irq(d);
84 irq_chip_unmask_parent(d);
87 static struct irq_chip gicv2m_msi_irq_chip = {
88 .name = "MSI",
89 .irq_mask = gicv2m_mask_msi_irq,
90 .irq_unmask = gicv2m_unmask_msi_irq,
91 .irq_eoi = irq_chip_eoi_parent,
92 .irq_write_msi_msg = pci_msi_domain_write_msg,
95 static struct msi_domain_info gicv2m_msi_domain_info = {
96 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
97 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
98 .chip = &gicv2m_msi_irq_chip,
101 static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
103 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
104 phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
106 msg->address_hi = upper_32_bits(addr);
107 msg->address_lo = lower_32_bits(addr);
108 msg->data = data->hwirq;
110 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
111 msg->data -= v2m->spi_offset;
113 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
116 static struct irq_chip gicv2m_irq_chip = {
117 .name = "GICv2m",
118 .irq_mask = irq_chip_mask_parent,
119 .irq_unmask = irq_chip_unmask_parent,
120 .irq_eoi = irq_chip_eoi_parent,
121 .irq_set_affinity = irq_chip_set_affinity_parent,
122 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
125 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
126 unsigned int virq,
127 irq_hw_number_t hwirq)
129 struct irq_fwspec fwspec;
130 struct irq_data *d;
131 int err;
133 if (is_of_node(domain->parent->fwnode)) {
134 fwspec.fwnode = domain->parent->fwnode;
135 fwspec.param_count = 3;
136 fwspec.param[0] = 0;
137 fwspec.param[1] = hwirq - 32;
138 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
139 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
140 fwspec.fwnode = domain->parent->fwnode;
141 fwspec.param_count = 2;
142 fwspec.param[0] = hwirq;
143 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
144 } else {
145 return -EINVAL;
148 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
149 if (err)
150 return err;
152 /* Configure the interrupt line to be edge */
153 d = irq_domain_get_irq_data(domain->parent, virq);
154 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
155 return 0;
158 static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
159 int nr_irqs)
161 spin_lock(&v2m_lock);
162 bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
163 get_count_order(nr_irqs));
164 spin_unlock(&v2m_lock);
167 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
168 unsigned int nr_irqs, void *args)
170 msi_alloc_info_t *info = args;
171 struct v2m_data *v2m = NULL, *tmp;
172 int hwirq, offset, i, err = 0;
174 spin_lock(&v2m_lock);
175 list_for_each_entry(tmp, &v2m_nodes, entry) {
176 offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
177 get_count_order(nr_irqs));
178 if (offset >= 0) {
179 v2m = tmp;
180 break;
183 spin_unlock(&v2m_lock);
185 if (!v2m)
186 return -ENOSPC;
188 hwirq = v2m->spi_start + offset;
190 err = iommu_dma_prepare_msi(info->desc,
191 v2m->res.start + V2M_MSI_SETSPI_NS);
192 if (err)
193 return err;
195 for (i = 0; i < nr_irqs; i++) {
196 err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
197 if (err)
198 goto fail;
200 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
201 &gicv2m_irq_chip, v2m);
204 return 0;
206 fail:
207 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
208 gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
209 return err;
212 static void gicv2m_irq_domain_free(struct irq_domain *domain,
213 unsigned int virq, unsigned int nr_irqs)
215 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
216 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
218 gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
219 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
222 static const struct irq_domain_ops gicv2m_domain_ops = {
223 .alloc = gicv2m_irq_domain_alloc,
224 .free = gicv2m_irq_domain_free,
227 static bool is_msi_spi_valid(u32 base, u32 num)
229 if (base < V2M_MIN_SPI) {
230 pr_err("Invalid MSI base SPI (base:%u)\n", base);
231 return false;
234 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
235 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
236 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
237 return false;
240 return true;
243 static struct irq_chip gicv2m_pmsi_irq_chip = {
244 .name = "pMSI",
247 static struct msi_domain_ops gicv2m_pmsi_ops = {
250 static struct msi_domain_info gicv2m_pmsi_domain_info = {
251 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
252 .ops = &gicv2m_pmsi_ops,
253 .chip = &gicv2m_pmsi_irq_chip,
256 static void gicv2m_teardown(void)
258 struct v2m_data *v2m, *tmp;
260 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
261 list_del(&v2m->entry);
262 kfree(v2m->bm);
263 iounmap(v2m->base);
264 of_node_put(to_of_node(v2m->fwnode));
265 if (is_fwnode_irqchip(v2m->fwnode))
266 irq_domain_free_fwnode(v2m->fwnode);
267 kfree(v2m);
271 static int gicv2m_allocate_domains(struct irq_domain *parent)
273 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
274 struct v2m_data *v2m;
276 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
277 if (!v2m)
278 return 0;
280 inner_domain = irq_domain_create_tree(v2m->fwnode,
281 &gicv2m_domain_ops, v2m);
282 if (!inner_domain) {
283 pr_err("Failed to create GICv2m domain\n");
284 return -ENOMEM;
287 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
288 inner_domain->parent = parent;
289 pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
290 &gicv2m_msi_domain_info,
291 inner_domain);
292 plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
293 &gicv2m_pmsi_domain_info,
294 inner_domain);
295 if (!pci_domain || !plat_domain) {
296 pr_err("Failed to create MSI domains\n");
297 if (plat_domain)
298 irq_domain_remove(plat_domain);
299 if (pci_domain)
300 irq_domain_remove(pci_domain);
301 irq_domain_remove(inner_domain);
302 return -ENOMEM;
305 return 0;
308 static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
309 u32 spi_start, u32 nr_spis,
310 struct resource *res)
312 int ret;
313 struct v2m_data *v2m;
315 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
316 if (!v2m) {
317 pr_err("Failed to allocate struct v2m_data.\n");
318 return -ENOMEM;
321 INIT_LIST_HEAD(&v2m->entry);
322 v2m->fwnode = fwnode;
324 memcpy(&v2m->res, res, sizeof(struct resource));
326 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
327 if (!v2m->base) {
328 pr_err("Failed to map GICv2m resource\n");
329 ret = -ENOMEM;
330 goto err_free_v2m;
333 if (spi_start && nr_spis) {
334 v2m->spi_start = spi_start;
335 v2m->nr_spis = nr_spis;
336 } else {
337 u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
339 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
340 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
343 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
344 ret = -EINVAL;
345 goto err_iounmap;
349 * APM X-Gene GICv2m implementation has an erratum where
350 * the MSI data needs to be the offset from the spi_start
351 * in order to trigger the correct MSI interrupt. This is
352 * different from the standard GICv2m implementation where
353 * the MSI data is the absolute value within the range from
354 * spi_start to (spi_start + num_spis).
356 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
357 * is 'spi_number - 32'
359 switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
360 case XGENE_GICV2M_MSI_IIDR:
361 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
362 v2m->spi_offset = v2m->spi_start;
363 break;
364 case BCM_NS2_GICV2M_MSI_IIDR:
365 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
366 v2m->spi_offset = 32;
367 break;
370 v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
371 GFP_KERNEL);
372 if (!v2m->bm) {
373 ret = -ENOMEM;
374 goto err_iounmap;
377 list_add_tail(&v2m->entry, &v2m_nodes);
379 pr_info("range%pR, SPI[%d:%d]\n", res,
380 v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
381 return 0;
383 err_iounmap:
384 iounmap(v2m->base);
385 err_free_v2m:
386 kfree(v2m);
387 return ret;
390 static struct of_device_id gicv2m_device_id[] = {
391 { .compatible = "arm,gic-v2m-frame", },
395 static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
396 struct irq_domain *parent)
398 int ret = 0;
399 struct device_node *node = to_of_node(parent_handle);
400 struct device_node *child;
402 for (child = of_find_matching_node(node, gicv2m_device_id); child;
403 child = of_find_matching_node(child, gicv2m_device_id)) {
404 u32 spi_start = 0, nr_spis = 0;
405 struct resource res;
407 if (!of_find_property(child, "msi-controller", NULL))
408 continue;
410 ret = of_address_to_resource(child, 0, &res);
411 if (ret) {
412 pr_err("Failed to allocate v2m resource.\n");
413 break;
416 if (!of_property_read_u32(child, "arm,msi-base-spi",
417 &spi_start) &&
418 !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
419 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
420 spi_start, nr_spis);
422 ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
423 if (ret) {
424 of_node_put(child);
425 break;
429 if (!ret)
430 ret = gicv2m_allocate_domains(parent);
431 if (ret)
432 gicv2m_teardown();
433 return ret;
436 #ifdef CONFIG_ACPI
437 static int acpi_num_msi;
439 static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
441 struct v2m_data *data;
443 if (WARN_ON(acpi_num_msi <= 0))
444 return NULL;
446 /* We only return the fwnode of the first MSI frame. */
447 data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
448 if (!data)
449 return NULL;
451 return data->fwnode;
454 static int __init
455 acpi_parse_madt_msi(union acpi_subtable_headers *header,
456 const unsigned long end)
458 int ret;
459 struct resource res;
460 u32 spi_start = 0, nr_spis = 0;
461 struct acpi_madt_generic_msi_frame *m;
462 struct fwnode_handle *fwnode;
464 m = (struct acpi_madt_generic_msi_frame *)header;
465 if (BAD_MADT_ENTRY(m, end))
466 return -EINVAL;
468 res.start = m->base_address;
469 res.end = m->base_address + SZ_4K - 1;
470 res.flags = IORESOURCE_MEM;
472 if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
473 spi_start = m->spi_base;
474 nr_spis = m->spi_count;
476 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
477 spi_start, nr_spis);
480 fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
481 if (!fwnode) {
482 pr_err("Unable to allocate GICv2m domain token\n");
483 return -EINVAL;
486 ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
487 if (ret)
488 irq_domain_free_fwnode(fwnode);
490 return ret;
493 static int __init gicv2m_acpi_init(struct irq_domain *parent)
495 int ret;
497 if (acpi_num_msi > 0)
498 return 0;
500 acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
501 acpi_parse_madt_msi, 0);
503 if (acpi_num_msi <= 0)
504 goto err_out;
506 ret = gicv2m_allocate_domains(parent);
507 if (ret)
508 goto err_out;
510 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
512 return 0;
514 err_out:
515 gicv2m_teardown();
516 return -EINVAL;
518 #else /* CONFIG_ACPI */
519 static int __init gicv2m_acpi_init(struct irq_domain *parent)
521 return -EINVAL;
523 #endif /* CONFIG_ACPI */
525 int __init gicv2m_init(struct fwnode_handle *parent_handle,
526 struct irq_domain *parent)
528 if (is_of_node(parent_handle))
529 return gicv2m_of_init(parent_handle, parent);
531 return gicv2m_acpi_init(parent);